13
13
along with this program; if not, write to the Free Software
14
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
16
#ifdef USE_PRAGMA_IMPLEMENTATION
17
#pragma implementation // gcc: Class implementation
17
#include "drizzled/server_includes.h"
18
#include "drizzled/field.h"
19
#include "drizzled/field/blob.h"
20
#include "drizzled/field/timestamp.h"
21
#include "plugin/myisam/myisam.h"
22
#include "drizzled/table.h"
23
#include "drizzled/session.h"
24
#include <mysys/my_dir.h>
20
#include <drizzled/common_includes.h>
21
#include <storage/myisam/myisam.h>
26
23
#include "ha_archive.h"
34
static const string engine_name("ARCHIVE");
37
First, if you want to understand storage engines you should look at
38
ha_example.cc and ha_example.h.
26
First, if you want to understand storage engines you should look at
27
ha_example.cc and ha_example.h.
40
29
This example was written as a test case for a customer who needed
41
30
a storage engine without indexes that could compress data very well.
42
31
So, welcome to a completely compressed storage engine. This storage
43
engine only does inserts. No replace, deletes, or updates. All reads are
32
engine only does inserts. No replace, deletes, or updates. All reads are
44
33
complete table scans. Compression is done through a combination of packing
45
34
and making use of the zlib library
47
36
We keep a file pointer open for each instance of ha_archive for each read
48
37
but for writes we keep one open file handle just for that. We flush it
49
38
only if we have a read occur. azip handles compressing lots of records
53
42
the same time since we would want to flush).
55
44
A "meta" file is kept alongside the data file. This file serves two purpose.
56
The first purpose is to track the number of rows in the table. The second
57
purpose is to determine if the table was closed properly or not. When the
58
meta file is first opened it is marked as dirty. It is opened when the table
59
itself is opened for writing. When the table is closed the new count for rows
60
is written to the meta file and the file is marked as clean. If the meta file
61
is opened and it is marked as dirty, it is assumed that a crash occured. At
45
The first purpose is to track the number of rows in the table. The second
46
purpose is to determine if the table was closed properly or not. When the
47
meta file is first opened it is marked as dirty. It is opened when the table
48
itself is opened for writing. When the table is closed the new count for rows
49
is written to the meta file and the file is marked as clean. If the meta file
50
is opened and it is marked as dirty, it is assumed that a crash occured. At
62
51
this point an error occurs and the user is told to rebuild the file.
63
52
A rebuild scans the rows and rewrites the meta file. If corruption is found
64
53
in the data file then the meta file is not repaired.
66
55
At some point a recovery method for such a drastic case needs to be divised.
68
Locks are row level, and you will get a consistant read.
57
Locks are row level, and you will get a consistant read.
70
59
For performance as far as table scans go it is quite fast. I don't have
71
60
good numbers but locally it has out performed both Innodb and MyISAM. For
72
61
Innodb the question will be if the table can be fit into the buffer
73
62
pool. For MyISAM its a question of how much the file system caches the
74
63
MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
75
doesn't have enough memory to cache entire table that archive turns out
64
doesn't have enough memory to cache entire table that archive turns out
78
67
Examples between MyISAM (packed) and Archive.
104
93
/* Variables for archive share methods */
105
pthread_mutex_t archive_mutex= PTHREAD_MUTEX_INITIALIZER;
107
std::map<const char *, ArchiveShare *> archive_open_tables;
94
pthread_mutex_t archive_mutex;
95
static HASH archive_open_tables;
109
96
static unsigned int global_version;
111
98
/* The file extension */
112
99
#define ARZ ".ARZ" // The data file
113
100
#define ARN ".ARN" // Files used during an optimize call
101
#define ARM ".ARM" // Meta file (deprecated)
104
unsigned char + unsigned char
106
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
107
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
109
/* Static declarations for handerton */
110
static handler *archive_create_handler(handlerton *hton,
113
int archive_discover(handlerton *hton, THD* thd, const char *db,
115
unsigned char **frmblob,
117
118
static bool archive_use_aio= false;
127
128
#define ARCHIVE_ROW_HEADER_SIZE 4
130
static handler *archive_create_handler(handlerton *hton,
134
return new (mem_root) ha_archive(hton, table);
130
We just implement one additional file extension.
138
Used for hash table that tracks open tables.
132
static const char *ha_archive_exts[] = {
137
class ArchiveTableNameIterator: public drizzled::plugin::TableNameIteratorImplementation
141
uint32_t current_entry;
144
ArchiveTableNameIterator(const std::string &database)
145
: drizzled::plugin::TableNameIteratorImplementation(database), dirp(NULL), current_entry(-1)
148
~ArchiveTableNameIterator();
150
int next(std::string *name);
154
ArchiveTableNameIterator::~ArchiveTableNameIterator()
160
int ArchiveTableNameIterator::next(string *name)
162
char uname[NAME_LEN + 1];
165
uint32_t file_name_len;
166
const char *wild= NULL;
171
char path[FN_REFLEN];
173
build_table_filename(path, sizeof(path), db.c_str(), "", false);
174
dirp = my_dir(path,MYF(dir ? MY_WANT_STAT : 0));
177
if (my_errno == ENOENT)
178
my_error(ER_BAD_DB_ERROR, MYF(ME_BELL+ME_WAITTANG), db.c_str());
180
my_error(ER_CANT_READ_DIR, MYF(ME_BELL+ME_WAITTANG), path, my_errno);
190
if (current_entry == dirp->number_off_files)
197
file= dirp->dir_entry + current_entry;
199
if (my_strcasecmp(system_charset_info, ext=strchr(file->name,'.'), ARZ) ||
200
is_prefix(file->name, TMP_FILE_PREFIX))
204
file_name_len= filename_to_tablename(file->name, uname, sizeof(uname));
206
uname[file_name_len]= '\0';
208
if (wild && wild_compare(uname, wild, 0))
217
class ArchiveEngine : public drizzled::plugin::StorageEngine
220
ArchiveEngine(const string &name_arg)
221
: drizzled::plugin::StorageEngine(name_arg,
223
| HTON_HAS_DATA_DICTIONARY) {}
225
virtual handler *create(TableShare *table,
228
return new (mem_root) ha_archive(this, table);
231
const char **bas_ext() const {
232
return ha_archive_exts;
235
int createTableImplementation(Session *session, const char *table_name,
236
Table *table_arg, HA_CREATE_INFO *create_info,
237
drizzled::message::Table* proto);
239
int getTableProtoImplementation(const char* path,
240
drizzled::message::Table *table_proto);
242
drizzled::plugin::TableNameIteratorImplementation* tableNameIterator(const std::string &database)
244
return new ArchiveTableNameIterator(database);
248
int ArchiveEngine::getTableProtoImplementation(const char* path,
249
drizzled::message::Table *table_proto)
251
struct stat stat_info;
255
proto_path.reserve(FN_REFLEN);
256
proto_path.assign(path);
258
proto_path.append(ARZ);
260
if (stat(proto_path.c_str(),&stat_info))
265
azio_stream proto_stream;
267
if(azopen(&proto_stream, proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
268
return HA_ERR_CRASHED_ON_USAGE;
270
proto_string= (char*)malloc(sizeof(char) * proto_stream.frm_length);
271
if (proto_string == NULL)
273
azclose(&proto_stream);
277
azread_frm(&proto_stream, proto_string);
279
if(table_proto->ParseFromArray(proto_string, proto_stream.frm_length) == false)
280
error= HA_ERR_CRASHED_ON_USAGE;
282
azclose(&proto_stream);
289
static ArchiveEngine *archive_engine= NULL;
140
static unsigned char* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
141
bool not_used __attribute__((unused)))
143
*length=share->table_name_length;
144
return (unsigned char*) share->table_name;
292
149
Initialize the archive handler.
303
static int archive_db_init(drizzled::plugin::Registry ®istry)
160
int archive_db_init(void *p)
162
handlerton *archive_hton;
306
pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST);
307
archive_engine= new ArchiveEngine(engine_name);
308
registry.add(archive_engine);
164
archive_hton= (handlerton *)p;
165
archive_hton->state= SHOW_OPTION_YES;
166
archive_hton->create= archive_create_handler;
167
archive_hton->flags= HTON_NO_FLAGS;
168
archive_hton->discover= archive_discover;
310
170
/* When the engine starts up set the first version */
311
171
global_version= 1;
173
if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
175
if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
176
(hash_get_key) archive_get_key, 0, 0))
178
pthread_mutex_destroy(&archive_mutex);
327
static int archive_db_done(drizzled::plugin::Registry ®istry)
199
int archive_db_done(void *p __attribute__((unused)))
329
registry.remove(archive_engine);
330
delete archive_engine;
201
hash_free(&archive_open_tables);
332
202
pthread_mutex_destroy(&archive_mutex);
338
ha_archive::ha_archive(drizzled::plugin::StorageEngine *engine_arg,
339
TableShare *table_arg)
340
:handler(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
208
ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
209
:handler(hton, table_arg), delayed_insert(0), bulk_insert(0)
342
211
/* Set our original buffer from pre-allocated memory */
343
212
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
347
216
archive_reader_open= false;
219
int archive_discover(handlerton *hton __attribute__((unused)),
220
THD* thd __attribute__((unused)),
223
unsigned char **frmblob,
226
azio_stream frm_stream;
227
char az_file[FN_REFLEN];
229
struct stat file_stat;
231
fn_format(az_file, name, db, ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
233
if (stat(az_file, &file_stat))
236
if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY, AZ_METHOD_BLOCK)))
238
if (errno == EROFS || errno == EACCES)
239
return(my_errno= errno);
240
return(HA_ERR_CRASHED_ON_USAGE);
243
if (frm_stream.frm_length == 0)
246
frm_ptr= (char *)my_malloc(sizeof(char) * frm_stream.frm_length, MYF(0));
247
azread_frm(&frm_stream, frm_ptr);
248
azclose(&frm_stream);
250
*frmlen= frm_stream.frm_length;
251
*frmblob= (unsigned char*) frm_ptr;
351
260
This method reads the header of a datafile and returns whether or not it was successful.
364
ArchiveShare::ArchiveShare():
365
use_count(0), archive_write_open(false), dirty(false), crashed(false),
366
mean_rec_length(0), version(0), rows_recorded(0), version_rows(0)
371
ArchiveShare::ArchiveShare(const char *name):
372
use_count(0), archive_write_open(false), dirty(false), crashed(false),
373
mean_rec_length(0), version(0), rows_recorded(0), version_rows(0)
375
memset(&archive_write, 0, sizeof(azio_stream)); /* Archive file we are working with */
376
table_name.append(name);
377
fn_format(data_file_name, table_name.c_str(), "",
378
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
380
We will use this lock for rows.
382
pthread_mutex_init(&mutex,MY_MUTEX_INIT_FAST);
385
ArchiveShare::~ArchiveShare()
387
thr_lock_delete(&lock);
388
pthread_mutex_destroy(&mutex);
390
We need to make sure we don't reset the crashed state.
391
If we open a crashed file, wee need to close it as crashed unless
392
it has been repaired.
393
Since we will close the data down after this, we go on and count
396
if (archive_write_open == true)
397
(void)azclose(&archive_write);
400
bool ArchiveShare::prime(uint64_t *auto_increment)
402
azio_stream archive_tmp;
405
We read the meta file, but do not mark it dirty. Since we are not
406
doing a write we won't mark it dirty (and we won't open it for
407
anything but reading... open it for write and we will generate null
410
if (!(azopen(&archive_tmp, data_file_name, O_RDONLY,
414
*auto_increment= archive_tmp.auto_increment + 1;
415
rows_recorded= (ha_rows)archive_tmp.rows;
416
crashed= archive_tmp.dirty;
417
if (version < global_version)
419
version_rows= rows_recorded;
420
version= global_version;
422
azclose(&archive_tmp);
429
We create the shared memory space that we will use for the open table.
275
We create the shared memory space that we will use for the open table.
430
276
No matter what we try to get or create a share. This is so that a repair
431
table operation can occur.
277
table operation can occur.
433
279
See ha_example.cc for a longer description.
435
ArchiveShare *ha_archive::get_share(const char *table_name, int *rc)
281
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
438
map<const char *, ArchiveShare *> ::iterator find_iter;
440
285
pthread_mutex_lock(&archive_mutex);
441
286
length=(uint) strlen(table_name);
443
find_iter= archive_open_tables.find(table_name);
445
if (find_iter != archive_open_tables.end())
446
share= (*find_iter).second;
288
if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
289
(unsigned char*) table_name,
452
share= new ArchiveShare(table_name);
293
azio_stream archive_tmp;
295
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
296
&share, sizeof(*share),
456
300
pthread_mutex_unlock(&archive_mutex);
457
301
*rc= HA_ERR_OUT_OF_MEM;
461
if (share->prime(&stats.auto_increment_value) == false)
306
share->table_name_length= length;
307
share->table_name= tmp_name;
308
share->crashed= false;
309
share->archive_write_open= false;
310
fn_format(share->data_file_name, table_name, "",
311
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
312
my_stpcpy(share->table_name, table_name);
314
We will use this lock for rows.
316
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
319
We read the meta file, but do not mark it dirty. Since we are not
320
doing a write we won't mark it dirty (and we won't open it for
321
anything but reading... open it for write and we will generate null
324
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY,
327
pthread_mutex_destroy(&share->mutex);
463
329
pthread_mutex_unlock(&archive_mutex);
464
330
*rc= HA_ERR_CRASHED_ON_REPAIR;
470
archive_open_tables[share->table_name.c_str()]= share;
333
stats.auto_increment_value= archive_tmp.auto_increment + 1;
334
share->rows_recorded= (ha_rows)archive_tmp.rows;
335
share->crashed= archive_tmp.dirty;
336
if (share->version < global_version)
338
share->version_rows= share->rows_recorded;
339
share->version= global_version;
341
azclose(&archive_tmp);
343
my_hash_insert(&archive_open_tables, (unsigned char*) share);
471
344
thr_lock_init(&share->lock);
473
346
share->use_count++;
484
357
See ha_example.cc for a description.
486
359
int ha_archive::free_share()
488
363
pthread_mutex_lock(&archive_mutex);
489
364
if (!--share->use_count)
491
archive_open_tables.erase(share->table_name.c_str());
366
hash_delete(&archive_open_tables, (unsigned char*) share);
367
thr_lock_delete(&share->lock);
368
pthread_mutex_destroy(&share->mutex);
370
We need to make sure we don't reset the crashed state.
371
If we open a crashed file, wee need to close it as crashed unless
372
it has been repaired.
373
Since we will close the data down after this, we go on and count
376
if (share->archive_write_open == true)
378
if (azclose(&(share->archive_write)))
381
free((unsigned char*) share);
494
383
pthread_mutex_unlock(&archive_mutex);
499
388
int ha_archive::init_archive_writer()
502
391
It is expensive to open and close the data files and since you can't have
503
392
a gzip file that can be both read and written we keep a writer open
504
393
that is shared amoung all open tables.
506
if (!(azopen(&(share->archive_write), share->data_file_name,
507
O_RDWR, AZ_METHOD_BLOCK)))
395
if (!(azopen(&(share->archive_write), share->data_file_name,
396
O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
509
398
share->crashed= true;
635
We create our data file here. The format is pretty simple.
543
We create our data file here. The format is pretty simple.
636
544
You can read about the format of the data file above.
637
Unlike other storage engines we do not "pack" our data. Since we
638
are about to do a general compression, packing would just be a waste of
639
CPU time. If the table has blobs they are written after the row in the order
545
Unlike other storage engines we do not "pack" our data. Since we
546
are about to do a general compression, packing would just be a waste of
547
CPU time. If the table has blobs they are written after the row in the order
643
int ArchiveEngine::createTableImplementation(Session *session,
644
const char *table_name,
646
HA_CREATE_INFO *create_info,
647
drizzled::message::Table *proto)
551
int ha_archive::create(const char *name, Table *table_arg,
552
HA_CREATE_INFO *create_info)
649
554
char name_buff[FN_REFLEN];
555
char linkname[FN_REFLEN];
651
557
azio_stream create_stream; /* Archive file we are working with */
652
uint64_t auto_increment_value;
653
string serialized_proto;
558
File frm_file; /* File handler for readers */
559
struct stat file_stat;
560
unsigned char *frm_ptr;
655
auto_increment_value= create_info->auto_increment_value;
562
stats.auto_increment_value= create_info->auto_increment_value;
657
564
for (uint32_t key= 0; key < table_arg->sizeKeys(); key++)
676
583
We reuse name_buff since it is available.
678
fn_format(name_buff, table_name, "", ARZ,
679
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
682
if (azopen(&create_stream, name_buff, O_CREAT|O_RDWR,
683
AZ_METHOD_BLOCK) == 0)
585
if (create_info->data_file_name && create_info->data_file_name[0] != '#')
587
fn_format(name_buff, create_info->data_file_name, "", ARZ,
588
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
589
fn_format(linkname, name, "", ARZ,
590
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
689
proto->SerializeToString(&serialized_proto);
691
if (azwrite_frm(&create_stream, serialized_proto.c_str(),
692
serialized_proto.length()))
695
if (proto->options().has_comment())
699
write_length= azwrite_comment(&create_stream,
700
proto->options().comment().c_str(),
701
proto->options().comment().length());
703
if (write_length < 0)
594
fn_format(name_buff, name, "", ARZ,
595
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
711
Yes you need to do this, because the starting value
712
for the autoincrement may not be zero.
600
There is a chance that the file was "discovered". In this case
601
just use whatever file is there.
714
create_stream.auto_increment= auto_increment_value ?
715
auto_increment_value - 1 : 0;
717
if (azclose(&create_stream))
603
if (!stat(name_buff, &file_stat))
606
if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY,
614
my_symlink(name_buff, linkname, MYF(0));
615
fn_format(name_buff, name, "", ".frm",
616
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
619
Here is where we open up the frm and pass it to archive to store
621
if ((frm_file= my_open(name_buff, O_RDONLY, MYF(0))) > 0)
623
if (fstat(frm_file, &file_stat))
625
frm_ptr= (unsigned char *)my_malloc(sizeof(unsigned char) * file_stat.st_size, MYF(0));
628
my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0));
629
azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size);
630
free((unsigned char*)frm_ptr);
633
my_close(frm_file, MYF(0));
636
if (create_info->comment.str)
637
azwrite_comment(&create_stream, create_info->comment.str,
638
(unsigned int)create_info->comment.length);
641
Yes you need to do this, because the starting value
642
for the autoincrement may not be zero.
644
create_stream.auto_increment= stats.auto_increment_value ?
645
stats.auto_increment_value - 1 : 0;
646
if (azclose(&create_stream))
726
deleteTable(session, table_name);
728
660
/* Return error number, if we got one */
729
661
return(error ? error : -1);
833
767
We don't support decremening auto_increment. They make the performance
836
if (temp_auto <= share->archive_write.auto_increment &&
770
if (temp_auto <= share->archive_write.auto_increment &&
837
771
mkey->flags & HA_NOSAME)
839
773
rc= HA_ERR_FOUND_DUPP_KEY;
778
Bad news, this will cause a search for the unique value which is very
779
expensive since we will have to do a table scan which will lock up
780
all other writers during this period. This could perhaps be optimized
785
First we create a buffer that we can use for reading rows, and can pass
788
if (!(read_buf= (unsigned char*) my_malloc(table->s->reclength, MYF(MY_WME))))
790
rc= HA_ERR_OUT_OF_MEM;
794
All of the buffer must be written out or we won't see all of the
797
azflush(&(share->archive_write), Z_SYNC_FLUSH);
799
Set the position of the local read thread to the beginning postion.
801
if (read_data_header(&archive))
803
rc= HA_ERR_CRASHED_ON_USAGE;
807
Field *mfield= table->next_number_field;
809
while (!(get_row(&archive, read_buf)))
811
if (!memcmp(read_buf + mfield->offset(record),
812
table->next_number_field->ptr,
813
mfield->max_display_length()))
815
rc= HA_ERR_FOUND_DUPP_KEY;
844
823
if (temp_auto > share->archive_write.auto_increment)
865
void ha_archive::get_auto_increment(uint64_t, uint64_t, uint64_t,
866
uint64_t *first_value, uint64_t *nb_reserved_values)
844
void ha_archive::get_auto_increment(uint64_t offset __attribute__((unused)),
845
uint64_t increment __attribute__((unused)),
846
uint64_t nb_desired_values __attribute__((unused)),
847
uint64_t *first_value __attribute__((unused)),
848
uint64_t *nb_reserved_values __attribute__((unused)))
868
850
*nb_reserved_values= UINT64_MAX;
869
851
*first_value= share->archive_write.auto_increment + 1;
872
854
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
873
int ha_archive::index_init(uint32_t keynr, bool)
855
int ha_archive::index_init(uint32_t keynr, bool sorted __attribute__((unused)))
875
857
active_index= keynr;
1124
1110
share->archive_write_open= false;
1128
proto_string= (char*)malloc(sizeof(char) * archive.frm_length);
1129
if (proto_string == NULL)
1133
azread_frm(&archive, proto_string);
1135
1113
/* Lets create a file to contain the new data */
1136
fn_format(writer_filename, share->table_name.c_str(), "", ARN,
1114
fn_format(writer_filename, share->table_name, "", ARN,
1137
1115
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
1139
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1142
return(HA_ERR_CRASHED_ON_USAGE);
1145
azwrite_frm(&writer, proto_string, archive.frm_length);
1148
An extended rebuild is a lot more effort. We open up each row and re-record it.
1149
Any dead rows are removed (aka rows that may have been partially recorded).
1117
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
1118
return(HA_ERR_CRASHED_ON_USAGE);
1121
An extended rebuild is a lot more effort. We open up each row and re-record it.
1122
Any dead rows are removed (aka rows that may have been partially recorded).
1151
1124
As of Archive format 3, this is the only type that is performed, before this
1152
1125
version it was just done on T_EXTEND
1213
1182
azclose(&writer);
1214
1183
share->dirty= false;
1216
1185
azclose(&archive);
1218
1187
// make the file we just wrote be our data file
1219
1188
rc = my_rename(writer_filename,share->data_file_name,MYF(0));
1225
1193
azclose(&writer);
1231
1199
Below is an example of how to setup row level locking.
1233
THR_LOCK_DATA **ha_archive::store_lock(Session *session,
1201
THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
1234
1202
THR_LOCK_DATA **to,
1235
1203
enum thr_lock_type lock_type)
1237
delayed_insert= false;
1205
if (lock_type == TL_WRITE_DELAYED)
1206
delayed_insert= true;
1208
delayed_insert= false;
1239
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1210
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1242
1213
Here is where we get into the guts of a row level lock.
1244
1215
If we are not doing a LOCK Table or DISCARD/IMPORT
1245
TABLESPACE, then allow multiple writers
1216
TABLESPACE, then allow multiple writers
1248
1219
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
1249
lock_type <= TL_WRITE)
1250
&& !session_tablespace_op(session))
1220
lock_type <= TL_WRITE) && !thd_in_lock_tables(thd)
1221
&& !thd_tablespace_op(thd))
1251
1222
lock_type = TL_WRITE_ALLOW_WRITE;
1254
1225
In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
1255
1226
MySQL would use the lock TL_READ_NO_INSERT on t2, and that
1256
1227
would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
1257
1228
to t2. Convert the lock to a normal read lock to allow
1258
concurrent inserts to t2.
1229
concurrent inserts to t2.
1261
if (lock_type == TL_READ_NO_INSERT)
1232
if (lock_type == TL_READ_NO_INSERT && !thd_in_lock_tables(thd))
1262
1233
lock_type = TL_READ;
1264
1235
lock.type=lock_type;
1370
1356
We just return state if asked.
1372
bool ha_archive::is_crashed() const
1358
bool ha_archive::is_crashed() const
1374
return(share->crashed);
1360
return(share->crashed);
1378
1364
Simple scan of the tables to make sure everything is ok.
1381
int ha_archive::check(Session* session, HA_CHECK_OPT *)
1367
int ha_archive::check(THD* thd,
1368
HA_CHECK_OPT* check_opt __attribute__((unused)))
1384
1371
const char *old_proc_info;
1387
old_proc_info= get_session_proc_info(session);
1388
set_session_proc_info(session, "Checking table");
1374
old_proc_info= thd_proc_info(thd, "Checking table");
1389
1375
/* Flush any waiting data */
1390
1376
pthread_mutex_lock(&share->mutex);
1391
1377
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1392
1378
pthread_mutex_unlock(&share->mutex);
1395
Now we will rewind the archive file so that we are positioned at the
1381
Now we will rewind the archive file so that we are positioned at the
1396
1382
start of the file.
1398
1384
init_archive_reader();
1422
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1409
Check and repair the table if needed.
1411
bool ha_archive::check_and_repair(THD *thd)
1413
HA_CHECK_OPT check_opt;
1417
return(repair(thd, &check_opt));
1420
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1424
1422
archive_record_buffer *r;
1425
if (!(r= (archive_record_buffer*) malloc(sizeof(archive_record_buffer))))
1424
(archive_record_buffer*) my_malloc(sizeof(archive_record_buffer),
1427
return(NULL); /* purecov: inspected */
1429
1429
r->length= (int)length;
1431
if (!(r->buffer= (unsigned char*) malloc(r->length)))
1431
if (!(r->buffer= (unsigned char*) my_malloc(r->length,
1433
1434
free((char*) r);
1435
return(NULL); /* purecov: inspected */
1440
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1441
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1442
1443
free((char*) r->buffer);
1443
1444
free((char*) r);