13
13
along with this program; if not, write to the Free Software
14
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
16
#ifdef USE_PRAGMA_IMPLEMENTATION
17
#pragma implementation // gcc: Class implementation
20
#include <drizzled/common_includes.h>
21
#include <storage/myisam/myisam.h>
17
#include "drizzled/server_includes.h"
18
#include "drizzled/field.h"
19
#include "drizzled/field/blob.h"
20
#include "drizzled/field/timestamp.h"
21
#include "plugin/myisam/myisam.h"
22
#include "drizzled/table.h"
23
#include "drizzled/session.h"
24
#include <mysys/my_dir.h>
23
26
#include "ha_archive.h"
34
static const string engine_name("ARCHIVE");
26
First, if you want to understand storage engines you should look at
27
ha_example.cc and ha_example.h.
37
First, if you want to understand storage engines you should look at
38
ha_example.cc and ha_example.h.
29
40
This example was written as a test case for a customer who needed
30
41
a storage engine without indexes that could compress data very well.
31
42
So, welcome to a completely compressed storage engine. This storage
32
engine only does inserts. No replace, deletes, or updates. All reads are
43
engine only does inserts. No replace, deletes, or updates. All reads are
33
44
complete table scans. Compression is done through a combination of packing
34
45
and making use of the zlib library
36
47
We keep a file pointer open for each instance of ha_archive for each read
37
48
but for writes we keep one open file handle just for that. We flush it
38
49
only if we have a read occur. azip handles compressing lots of records
42
53
the same time since we would want to flush).
44
55
A "meta" file is kept alongside the data file. This file serves two purpose.
45
The first purpose is to track the number of rows in the table. The second
46
purpose is to determine if the table was closed properly or not. When the
47
meta file is first opened it is marked as dirty. It is opened when the table
48
itself is opened for writing. When the table is closed the new count for rows
49
is written to the meta file and the file is marked as clean. If the meta file
50
is opened and it is marked as dirty, it is assumed that a crash occured. At
56
The first purpose is to track the number of rows in the table. The second
57
purpose is to determine if the table was closed properly or not. When the
58
meta file is first opened it is marked as dirty. It is opened when the table
59
itself is opened for writing. When the table is closed the new count for rows
60
is written to the meta file and the file is marked as clean. If the meta file
61
is opened and it is marked as dirty, it is assumed that a crash occured. At
51
62
this point an error occurs and the user is told to rebuild the file.
52
63
A rebuild scans the rows and rewrites the meta file. If corruption is found
53
64
in the data file then the meta file is not repaired.
55
66
At some point a recovery method for such a drastic case needs to be divised.
57
Locks are row level, and you will get a consistant read.
68
Locks are row level, and you will get a consistant read.
59
70
For performance as far as table scans go it is quite fast. I don't have
60
71
good numbers but locally it has out performed both Innodb and MyISAM. For
61
72
Innodb the question will be if the table can be fit into the buffer
62
73
pool. For MyISAM its a question of how much the file system caches the
63
74
MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
64
doesn't have enough memory to cache entire table that archive turns out
75
doesn't have enough memory to cache entire table that archive turns out
67
78
Examples between MyISAM (packed) and Archive.
93
104
/* Variables for archive share methods */
94
pthread_mutex_t archive_mutex;
95
static HASH archive_open_tables;
105
pthread_mutex_t archive_mutex= PTHREAD_MUTEX_INITIALIZER;
107
std::map<const char *, ArchiveShare *> archive_open_tables;
96
109
static unsigned int global_version;
98
111
/* The file extension */
99
112
#define ARZ ".ARZ" // The data file
100
113
#define ARN ".ARN" // Files used during an optimize call
101
#define ARM ".ARM" // Meta file (deprecated)
104
unsigned char + unsigned char
106
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
107
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
109
/* Static declarations for handerton */
110
static handler *archive_create_handler(handlerton *hton,
113
int archive_discover(handlerton *hton, THD* thd, const char *db,
115
unsigned char **frmblob,
118
117
static bool archive_use_aio= false;
128
127
#define ARCHIVE_ROW_HEADER_SIZE 4
130
static handler *archive_create_handler(handlerton *hton,
134
return new (mem_root) ha_archive(hton, table);
138
Used for hash table that tracks open tables.
130
We just implement one additional file extension.
140
static unsigned char* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
141
bool not_used __attribute__((unused)))
143
*length=share->table_name_length;
144
return (unsigned char*) share->table_name;
132
static const char *ha_archive_exts[] = {
137
class ArchiveTableNameIterator: public TableNameIteratorImplementation
141
uint32_t current_entry;
144
ArchiveTableNameIterator(const std::string &database)
145
: TableNameIteratorImplementation(database), dirp(NULL), current_entry(-1)
148
~ArchiveTableNameIterator();
150
int next(std::string *name);
154
ArchiveTableNameIterator::~ArchiveTableNameIterator()
160
int ArchiveTableNameIterator::next(string *name)
162
char uname[NAME_LEN + 1];
165
uint32_t file_name_len;
166
const char *wild= NULL;
171
char path[FN_REFLEN];
173
build_table_filename(path, sizeof(path), db.c_str(), "", false);
174
dirp = my_dir(path,MYF(dir ? MY_WANT_STAT : 0));
177
if (my_errno == ENOENT)
178
my_error(ER_BAD_DB_ERROR, MYF(ME_BELL+ME_WAITTANG), db.c_str());
180
my_error(ER_CANT_READ_DIR, MYF(ME_BELL+ME_WAITTANG), path, my_errno);
190
if (current_entry == dirp->number_off_files)
197
file= dirp->dir_entry + current_entry;
199
if (my_strcasecmp(system_charset_info, ext=strchr(file->name,'.'), ARZ) ||
200
is_prefix(file->name, TMP_FILE_PREFIX))
204
file_name_len= filename_to_tablename(file->name, uname, sizeof(uname));
206
uname[file_name_len]= '\0';
208
if (wild && wild_compare(uname, wild, 0))
217
class ArchiveEngine : public StorageEngine
220
ArchiveEngine(const string &name_arg) : StorageEngine(name_arg,
222
| HTON_HAS_DATA_DICTIONARY
225
virtual handler *create(TableShare *table,
228
return new (mem_root) ha_archive(this, table);
231
const char **bas_ext() const {
232
return ha_archive_exts;
235
int createTableImplementation(Session *session, const char *table_name,
236
Table *table_arg, HA_CREATE_INFO *create_info,
237
drizzled::message::Table* proto);
239
int getTableProtoImplementation(const char* path,
240
drizzled::message::Table *table_proto);
242
TableNameIteratorImplementation* tableNameIterator(const std::string &database)
244
return new ArchiveTableNameIterator(database);
248
int ArchiveEngine::getTableProtoImplementation(const char* path,
249
drizzled::message::Table *table_proto)
251
struct stat stat_info;
255
proto_path.reserve(FN_REFLEN);
256
proto_path.assign(path);
258
proto_path.append(ARZ);
260
if (stat(proto_path.c_str(),&stat_info))
265
azio_stream proto_stream;
267
if(azopen(&proto_stream, proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
268
return HA_ERR_CRASHED_ON_USAGE;
270
proto_string= (char*)malloc(sizeof(char) * proto_stream.frm_length);
271
if (proto_string == NULL)
273
azclose(&proto_stream);
277
azread_frm(&proto_stream, proto_string);
279
if(table_proto->ParseFromArray(proto_string, proto_stream.frm_length) == false)
280
error= HA_ERR_CRASHED_ON_USAGE;
282
azclose(&proto_stream);
289
static ArchiveEngine *archive_engine= NULL;
149
292
Initialize the archive handler.
160
int archive_db_init(void *p)
303
static int archive_db_init(drizzled::plugin::Registry ®istry)
162
handlerton *archive_hton;
164
archive_hton= (handlerton *)p;
165
archive_hton->state= SHOW_OPTION_YES;
166
archive_hton->create= archive_create_handler;
167
archive_hton->flags= HTON_NO_FLAGS;
168
archive_hton->discover= archive_discover;
306
pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST);
307
archive_engine= new ArchiveEngine(engine_name);
308
registry.add(archive_engine);
170
310
/* When the engine starts up set the first version */
171
311
global_version= 1;
173
if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
175
if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
176
(hash_get_key) archive_get_key, 0, 0))
178
pthread_mutex_destroy(&archive_mutex);
199
int archive_db_done(void *p __attribute__((unused)))
327
static int archive_db_done(drizzled::plugin::Registry ®istry)
201
hash_free(&archive_open_tables);
329
registry.remove(archive_engine);
330
delete archive_engine;
202
332
pthread_mutex_destroy(&archive_mutex);
208
ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
209
:handler(hton, table_arg), delayed_insert(0), bulk_insert(0)
338
ha_archive::ha_archive(StorageEngine *engine_arg, TableShare *table_arg)
339
:handler(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
211
341
/* Set our original buffer from pre-allocated memory */
212
342
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
216
346
archive_reader_open= false;
219
int archive_discover(handlerton *hton __attribute__((unused)),
220
THD* thd __attribute__((unused)),
223
unsigned char **frmblob,
226
azio_stream frm_stream;
227
char az_file[FN_REFLEN];
229
struct stat file_stat;
231
fn_format(az_file, name, db, ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
233
if (stat(az_file, &file_stat))
236
if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY, AZ_METHOD_BLOCK)))
238
if (errno == EROFS || errno == EACCES)
239
return(my_errno= errno);
240
return(HA_ERR_CRASHED_ON_USAGE);
243
if (frm_stream.frm_length == 0)
246
frm_ptr= (char *)my_malloc(sizeof(char) * frm_stream.frm_length, MYF(0));
247
azread_frm(&frm_stream, frm_ptr);
248
azclose(&frm_stream);
250
*frmlen= frm_stream.frm_length;
251
*frmblob= (unsigned char*) frm_ptr;
260
350
This method reads the header of a datafile and returns whether or not it was successful.
363
ArchiveShare::ArchiveShare():
364
use_count(0), archive_write_open(false), dirty(false), crashed(false),
365
mean_rec_length(0), version(0), rows_recorded(0), version_rows(0)
370
ArchiveShare::ArchiveShare(const char *name):
371
use_count(0), archive_write_open(false), dirty(false), crashed(false),
372
mean_rec_length(0), version(0), rows_recorded(0), version_rows(0)
374
memset(&archive_write, 0, sizeof(azio_stream)); /* Archive file we are working with */
375
table_name.append(name);
376
fn_format(data_file_name, table_name.c_str(), "",
377
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
379
We will use this lock for rows.
381
pthread_mutex_init(&mutex,MY_MUTEX_INIT_FAST);
384
ArchiveShare::~ArchiveShare()
386
thr_lock_delete(&lock);
387
pthread_mutex_destroy(&mutex);
389
We need to make sure we don't reset the crashed state.
390
If we open a crashed file, wee need to close it as crashed unless
391
it has been repaired.
392
Since we will close the data down after this, we go on and count
395
if (archive_write_open == true)
396
(void)azclose(&archive_write);
399
bool ArchiveShare::prime(uint64_t *auto_increment)
401
azio_stream archive_tmp;
404
We read the meta file, but do not mark it dirty. Since we are not
405
doing a write we won't mark it dirty (and we won't open it for
406
anything but reading... open it for write and we will generate null
409
if (!(azopen(&archive_tmp, data_file_name, O_RDONLY,
413
*auto_increment= archive_tmp.auto_increment + 1;
414
rows_recorded= (ha_rows)archive_tmp.rows;
415
crashed= archive_tmp.dirty;
416
if (version < global_version)
418
version_rows= rows_recorded;
419
version= global_version;
421
azclose(&archive_tmp);
275
We create the shared memory space that we will use for the open table.
428
We create the shared memory space that we will use for the open table.
276
429
No matter what we try to get or create a share. This is so that a repair
277
table operation can occur.
430
table operation can occur.
279
432
See ha_example.cc for a longer description.
281
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
434
ArchiveShare *ha_archive::get_share(const char *table_name, int *rc)
437
map<const char *, ArchiveShare *> ::iterator find_iter;
285
439
pthread_mutex_lock(&archive_mutex);
286
440
length=(uint) strlen(table_name);
288
if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
289
(unsigned char*) table_name,
442
find_iter= archive_open_tables.find(table_name);
444
if (find_iter != archive_open_tables.end())
445
share= (*find_iter).second;
293
azio_stream archive_tmp;
451
share= new ArchiveShare(table_name);
295
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
296
&share, sizeof(*share),
300
455
pthread_mutex_unlock(&archive_mutex);
301
456
*rc= HA_ERR_OUT_OF_MEM;
306
share->table_name_length= length;
307
share->table_name= tmp_name;
308
share->crashed= false;
309
share->archive_write_open= false;
310
fn_format(share->data_file_name, table_name, "",
311
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
312
my_stpcpy(share->table_name, table_name);
314
We will use this lock for rows.
316
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
319
We read the meta file, but do not mark it dirty. Since we are not
320
doing a write we won't mark it dirty (and we won't open it for
321
anything but reading... open it for write and we will generate null
324
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY,
460
if (share->prime(&stats.auto_increment_value) == false)
327
pthread_mutex_destroy(&share->mutex);
329
462
pthread_mutex_unlock(&archive_mutex);
330
463
*rc= HA_ERR_CRASHED_ON_REPAIR;
333
stats.auto_increment_value= archive_tmp.auto_increment + 1;
334
share->rows_recorded= (ha_rows)archive_tmp.rows;
335
share->crashed= archive_tmp.dirty;
336
if (share->version < global_version)
338
share->version_rows= share->rows_recorded;
339
share->version= global_version;
341
azclose(&archive_tmp);
343
my_hash_insert(&archive_open_tables, (unsigned char*) share);
469
archive_open_tables[share->table_name.c_str()]= share;
344
470
thr_lock_init(&share->lock);
346
472
share->use_count++;
357
483
See ha_example.cc for a description.
359
485
int ha_archive::free_share()
363
487
pthread_mutex_lock(&archive_mutex);
364
488
if (!--share->use_count)
366
hash_delete(&archive_open_tables, (unsigned char*) share);
367
thr_lock_delete(&share->lock);
368
pthread_mutex_destroy(&share->mutex);
370
We need to make sure we don't reset the crashed state.
371
If we open a crashed file, wee need to close it as crashed unless
372
it has been repaired.
373
Since we will close the data down after this, we go on and count
376
if (share->archive_write_open == true)
378
if (azclose(&(share->archive_write)))
381
free((unsigned char*) share);
490
archive_open_tables.erase(share->table_name.c_str());
383
493
pthread_mutex_unlock(&archive_mutex);
388
498
int ha_archive::init_archive_writer()
391
501
It is expensive to open and close the data files and since you can't have
392
502
a gzip file that can be both read and written we keep a writer open
393
503
that is shared amoung all open tables.
395
if (!(azopen(&(share->archive_write), share->data_file_name,
396
O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
505
if (!(azopen(&(share->archive_write), share->data_file_name,
506
O_RDWR, AZ_METHOD_BLOCK)))
398
508
share->crashed= true;
543
We create our data file here. The format is pretty simple.
636
We create our data file here. The format is pretty simple.
544
637
You can read about the format of the data file above.
545
Unlike other storage engines we do not "pack" our data. Since we
546
are about to do a general compression, packing would just be a waste of
547
CPU time. If the table has blobs they are written after the row in the order
638
Unlike other storage engines we do not "pack" our data. Since we
639
are about to do a general compression, packing would just be a waste of
640
CPU time. If the table has blobs they are written after the row in the order
551
int ha_archive::create(const char *name, Table *table_arg,
552
HA_CREATE_INFO *create_info)
644
int ArchiveEngine::createTableImplementation(Session *session,
645
const char *table_name,
647
HA_CREATE_INFO *create_info,
648
drizzled::message::Table *proto)
554
650
char name_buff[FN_REFLEN];
555
651
char linkname[FN_REFLEN];
557
653
azio_stream create_stream; /* Archive file we are working with */
558
File frm_file; /* File handler for readers */
559
struct stat file_stat;
560
unsigned char *frm_ptr;
654
uint64_t auto_increment_value;
655
string serialized_proto;
562
stats.auto_increment_value= create_info->auto_increment_value;
657
auto_increment_value= create_info->auto_increment_value;
564
659
for (uint32_t key= 0; key < table_arg->sizeKeys(); key++)
583
678
We reuse name_buff since it is available.
585
680
if (create_info->data_file_name && create_info->data_file_name[0] != '#')
587
682
fn_format(name_buff, create_info->data_file_name, "", ARZ,
588
683
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
589
fn_format(linkname, name, "", ARZ,
684
fn_format(linkname, table_name, "", ARZ,
590
685
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
594
fn_format(name_buff, name, "", ARZ,
689
fn_format(name_buff, table_name, "", ARZ,
595
690
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
695
if (azopen(&create_stream, name_buff, O_CREAT|O_RDWR,
696
AZ_METHOD_BLOCK) == 0)
703
if(symlink(name_buff, linkname) != 0)
706
proto->SerializeToString(&serialized_proto);
708
if (azwrite_frm(&create_stream, serialized_proto.c_str(),
709
serialized_proto.length()))
712
if (proto->options().has_comment())
716
write_length= azwrite_comment(&create_stream,
717
proto->options().comment().c_str(),
718
proto->options().comment().length());
720
if (write_length < 0)
600
There is a chance that the file was "discovered". In this case
601
just use whatever file is there.
728
Yes you need to do this, because the starting value
729
for the autoincrement may not be zero.
603
if (!stat(name_buff, &file_stat))
731
create_stream.auto_increment= auto_increment_value ?
732
auto_increment_value - 1 : 0;
734
if (azclose(&create_stream))
606
if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY,
614
my_symlink(name_buff, linkname, MYF(0));
615
fn_format(name_buff, name, "", ".frm",
616
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
619
Here is where we open up the frm and pass it to archive to store
621
if ((frm_file= my_open(name_buff, O_RDONLY, MYF(0))) > 0)
623
if (fstat(frm_file, &file_stat))
625
frm_ptr= (unsigned char *)my_malloc(sizeof(unsigned char) * file_stat.st_size, MYF(0));
628
my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0));
629
azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size);
630
free((unsigned char*)frm_ptr);
633
my_close(frm_file, MYF(0));
636
if (create_info->comment.str)
637
azwrite_comment(&create_stream, create_info->comment.str,
638
(unsigned int)create_info->comment.length);
641
Yes you need to do this, because the starting value
642
for the autoincrement may not be zero.
644
create_stream.auto_increment= stats.auto_increment_value ?
645
stats.auto_increment_value - 1 : 0;
646
if (azclose(&create_stream))
743
deleteTable(session, table_name);
660
745
/* Return error number, if we got one */
661
746
return(error ? error : -1);
767
850
We don't support decremening auto_increment. They make the performance
770
if (temp_auto <= share->archive_write.auto_increment &&
853
if (temp_auto <= share->archive_write.auto_increment &&
771
854
mkey->flags & HA_NOSAME)
773
856
rc= HA_ERR_FOUND_DUPP_KEY;
778
Bad news, this will cause a search for the unique value which is very
779
expensive since we will have to do a table scan which will lock up
780
all other writers during this period. This could perhaps be optimized
785
First we create a buffer that we can use for reading rows, and can pass
788
if (!(read_buf= (unsigned char*) my_malloc(table->s->reclength, MYF(MY_WME))))
790
rc= HA_ERR_OUT_OF_MEM;
794
All of the buffer must be written out or we won't see all of the
797
azflush(&(share->archive_write), Z_SYNC_FLUSH);
799
Set the position of the local read thread to the beginning postion.
801
if (read_data_header(&archive))
803
rc= HA_ERR_CRASHED_ON_USAGE;
807
Field *mfield= table->next_number_field;
809
while (!(get_row(&archive, read_buf)))
811
if (!memcmp(read_buf + mfield->offset(record),
812
table->next_number_field->ptr,
813
mfield->max_display_length()))
815
rc= HA_ERR_FOUND_DUPP_KEY;
823
861
if (temp_auto > share->archive_write.auto_increment)
844
void ha_archive::get_auto_increment(uint64_t offset __attribute__((unused)),
845
uint64_t increment __attribute__((unused)),
846
uint64_t nb_desired_values __attribute__((unused)),
847
uint64_t *first_value __attribute__((unused)),
848
uint64_t *nb_reserved_values __attribute__((unused)))
882
void ha_archive::get_auto_increment(uint64_t, uint64_t, uint64_t,
883
uint64_t *first_value, uint64_t *nb_reserved_values)
850
885
*nb_reserved_values= UINT64_MAX;
851
886
*first_value= share->archive_write.auto_increment + 1;
854
889
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
855
int ha_archive::index_init(uint32_t keynr, bool sorted __attribute__((unused)))
890
int ha_archive::index_init(uint32_t keynr, bool)
857
892
active_index= keynr;
1110
1141
share->archive_write_open= false;
1145
proto_string= (char*)malloc(sizeof(char) * archive.frm_length);
1146
if (proto_string == NULL)
1150
azread_frm(&archive, proto_string);
1113
1152
/* Lets create a file to contain the new data */
1114
fn_format(writer_filename, share->table_name, "", ARN,
1153
fn_format(writer_filename, share->table_name.c_str(), "", ARN,
1115
1154
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
1117
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
1118
return(HA_ERR_CRASHED_ON_USAGE);
1121
An extended rebuild is a lot more effort. We open up each row and re-record it.
1122
Any dead rows are removed (aka rows that may have been partially recorded).
1156
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1159
return(HA_ERR_CRASHED_ON_USAGE);
1162
azwrite_frm(&writer, proto_string, archive.frm_length);
1165
An extended rebuild is a lot more effort. We open up each row and re-record it.
1166
Any dead rows are removed (aka rows that may have been partially recorded).
1124
1168
As of Archive format 3, this is the only type that is performed, before this
1125
1169
version it was just done on T_EXTEND
1182
1230
azclose(&writer);
1183
1231
share->dirty= false;
1185
1233
azclose(&archive);
1187
1235
// make the file we just wrote be our data file
1188
1236
rc = my_rename(writer_filename,share->data_file_name,MYF(0));
1193
1242
azclose(&writer);
1199
1248
Below is an example of how to setup row level locking.
1201
THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
1250
THR_LOCK_DATA **ha_archive::store_lock(Session *session,
1202
1251
THR_LOCK_DATA **to,
1203
1252
enum thr_lock_type lock_type)
1205
if (lock_type == TL_WRITE_DELAYED)
1206
delayed_insert= true;
1208
delayed_insert= false;
1254
delayed_insert= false;
1210
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1256
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1213
1259
Here is where we get into the guts of a row level lock.
1215
1261
If we are not doing a LOCK Table or DISCARD/IMPORT
1216
TABLESPACE, then allow multiple writers
1262
TABLESPACE, then allow multiple writers
1219
1265
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
1220
lock_type <= TL_WRITE) && !thd_in_lock_tables(thd)
1221
&& !thd_tablespace_op(thd))
1266
lock_type <= TL_WRITE)
1267
&& !session_tablespace_op(session))
1222
1268
lock_type = TL_WRITE_ALLOW_WRITE;
1225
1271
In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
1226
1272
MySQL would use the lock TL_READ_NO_INSERT on t2, and that
1227
1273
would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
1228
1274
to t2. Convert the lock to a normal read lock to allow
1229
concurrent inserts to t2.
1275
concurrent inserts to t2.
1232
if (lock_type == TL_READ_NO_INSERT && !thd_in_lock_tables(thd))
1278
if (lock_type == TL_READ_NO_INSERT)
1233
1279
lock_type = TL_READ;
1235
1281
lock.type=lock_type;
1356
1387
We just return state if asked.
1358
bool ha_archive::is_crashed() const
1389
bool ha_archive::is_crashed() const
1360
return(share->crashed);
1391
return(share->crashed);
1364
1395
Simple scan of the tables to make sure everything is ok.
1367
int ha_archive::check(THD* thd,
1368
HA_CHECK_OPT* check_opt __attribute__((unused)))
1398
int ha_archive::check(Session* session, HA_CHECK_OPT *)
1371
1401
const char *old_proc_info;
1374
old_proc_info= thd_proc_info(thd, "Checking table");
1404
old_proc_info= get_session_proc_info(session);
1405
set_session_proc_info(session, "Checking table");
1375
1406
/* Flush any waiting data */
1376
1407
pthread_mutex_lock(&share->mutex);
1377
1408
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1378
1409
pthread_mutex_unlock(&share->mutex);
1381
Now we will rewind the archive file so that we are positioned at the
1412
Now we will rewind the archive file so that we are positioned at the
1382
1413
start of the file.
1384
1415
init_archive_reader();