13
13
along with this program; if not, write to the Free Software
14
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
16
#ifdef USE_PRAGMA_IMPLEMENTATION
17
#pragma implementation // gcc: Class implementation
17
#include <drizzled/common_includes.h>
18
#include <storage/myisam/myisam.h>
20
#include "mysql_priv.h"
20
23
#include "ha_archive.h"
26
#include <mysql/plugin.h>
23
29
First, if you want to understand storage engines you should look at
135
141
Used for hash table that tracks open tables.
137
static unsigned char* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
138
bool not_used __attribute__((unused)))
143
static uchar* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
144
my_bool not_used __attribute__((unused)))
140
146
*length=share->table_name_length;
141
return (unsigned char*) share->table_name;
147
return (uchar*) share->table_name;
157
163
int archive_db_init(void *p)
165
DBUG_ENTER("archive_db_init");
159
166
handlerton *archive_hton;
161
168
archive_hton= (handlerton *)p;
162
169
archive_hton->state= SHOW_OPTION_YES;
170
archive_hton->db_type= DB_TYPE_ARCHIVE_DB;
163
171
archive_hton->create= archive_create_handler;
164
172
archive_hton->flags= HTON_NO_FLAGS;
165
173
archive_hton->discover= archive_discover;
172
180
if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
173
181
(hash_get_key) archive_get_key, 0, 0))
175
pthread_mutex_destroy(&archive_mutex);
183
VOID(pthread_mutex_destroy(&archive_mutex));
211
219
/* The size of the offset value we will use for position() */
212
220
ref_length= sizeof(my_off_t);
213
archive_reader_open= false;
221
archive_reader_open= FALSE;
216
int archive_discover(handlerton *hton __attribute__((unused)),
217
THD* thd __attribute__((unused)),
224
int archive_discover(handlerton *hton __attribute__((__unused__)),
225
THD* thd __attribute__((__unused__)),
219
227
const char *name,
220
unsigned char **frmblob,
231
DBUG_ENTER("archive_discover");
232
DBUG_PRINT("archive_discover", ("db: %s, name: %s", db, name));
223
233
azio_stream frm_stream;
224
234
char az_file[FN_REFLEN];
230
240
if (stat(az_file, &file_stat))
233
if (!(azopen(&frm_stream, az_file, O_RDONLY, AZ_METHOD_BLOCK)))
243
if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY, AZ_METHOD_BLOCK)))
235
245
if (errno == EROFS || errno == EACCES)
236
return(my_errno= errno);
237
return(HA_ERR_CRASHED_ON_USAGE);
246
DBUG_RETURN(my_errno= errno);
247
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
240
250
if (frm_stream.frm_length == 0)
278
290
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
293
DBUG_ENTER("ha_archive::get_share");
282
295
pthread_mutex_lock(&archive_mutex);
283
296
length=(uint) strlen(table_name);
285
298
if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
286
(unsigned char*) table_name,
292
305
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
293
306
&share, sizeof(*share),
294
307
&tmp_name, length+1,
297
310
pthread_mutex_unlock(&archive_mutex);
298
311
*rc= HA_ERR_OUT_OF_MEM;
302
315
share->use_count= 0;
303
316
share->table_name_length= length;
304
317
share->table_name= tmp_name;
305
share->crashed= false;
306
share->archive_write_open= false;
318
share->crashed= FALSE;
319
share->archive_write_open= FALSE;
307
320
fn_format(share->data_file_name, table_name, "",
308
321
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
309
my_stpcpy(share->table_name, table_name);
322
strmov(share->table_name, table_name);
323
DBUG_PRINT("ha_archive", ("Data File %s",
324
share->data_file_name));
311
326
We will use this lock for rows.
313
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
328
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
316
331
We read the meta file, but do not mark it dirty. Since we are not
318
333
anything but reading... open it for write and we will generate null
319
334
compression writes).
321
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY,
336
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY,
322
337
AZ_METHOD_BLOCK)))
324
pthread_mutex_destroy(&share->mutex);
339
VOID(pthread_mutex_destroy(&share->mutex));
326
341
pthread_mutex_unlock(&archive_mutex);
327
342
*rc= HA_ERR_CRASHED_ON_REPAIR;
330
345
stats.auto_increment_value= archive_tmp.auto_increment + 1;
331
346
share->rows_recorded= (ha_rows)archive_tmp.rows;
338
353
azclose(&archive_tmp);
340
my_hash_insert(&archive_open_tables, (unsigned char*) share);
355
VOID(my_hash_insert(&archive_open_tables, (uchar*) share));
341
356
thr_lock_init(&share->lock);
343
358
share->use_count++;
359
DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles now",
360
share->table_name_length, share->table_name,
344
362
if (share->crashed)
345
363
*rc= HA_ERR_CRASHED_ON_USAGE;
346
364
pthread_mutex_unlock(&archive_mutex);
356
374
int ha_archive::free_share()
377
DBUG_ENTER("ha_archive::free_share");
378
DBUG_PRINT("ha_archive",
379
("archive table %.*s has %d open handles on entrance",
380
share->table_name_length, share->table_name,
360
383
pthread_mutex_lock(&archive_mutex);
361
384
if (!--share->use_count)
363
hash_delete(&archive_open_tables, (unsigned char*) share);
386
hash_delete(&archive_open_tables, (uchar*) share);
364
387
thr_lock_delete(&share->lock);
365
pthread_mutex_destroy(&share->mutex);
388
VOID(pthread_mutex_destroy(&share->mutex));
367
390
We need to make sure we don't reset the crashed state.
368
391
If we open a crashed file, wee need to close it as crashed unless
370
393
Since we will close the data down after this, we go on and count
371
394
the flush on close;
373
if (share->archive_write_open == true)
396
if (share->archive_write_open == TRUE)
375
398
if (azclose(&(share->archive_write)))
378
free((unsigned char*) share);
401
my_free((uchar*) share, MYF(0));
380
403
pthread_mutex_unlock(&archive_mutex);
385
408
int ha_archive::init_archive_writer()
410
DBUG_ENTER("ha_archive::init_archive_writer");
388
412
It is expensive to open and close the data files and since you can't have
389
413
a gzip file that can be both read and written we keep a writer open
390
414
that is shared amoung all open tables.
392
416
if (!(azopen(&(share->archive_write), share->data_file_name,
393
O_RDWR, AZ_METHOD_BLOCK)))
417
O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
395
share->crashed= true;
419
DBUG_PRINT("ha_archive", ("Could not open archive write file"));
420
share->crashed= TRUE;
398
share->archive_write_open= true;
423
share->archive_write_open= TRUE;
407
432
int ha_archive::init_archive_reader()
434
DBUG_ENTER("ha_archive::init_archive_reader");
410
436
It is expensive to open and close the data files and since you can't have
411
437
a gzip file that can be both read and written we keep a writer open
412
438
that is shared amoung all open tables.
414
if (archive_reader_open == false)
440
if (archive_reader_open == FALSE)
416
442
az_method method;
418
444
switch (archive_use_aio)
421
447
method= AZ_METHOD_BLOCK;
424
450
method= AZ_METHOD_AIO;
427
453
method= AZ_METHOD_BLOCK;
429
if (!(azopen(&archive, share->data_file_name, O_RDONLY,
455
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY,
432
share->crashed= true;
458
DBUG_PRINT("ha_archive", ("Could not open archive read file"));
459
share->crashed= TRUE;
435
archive_reader_open= true;
462
archive_reader_open= TRUE;
460
487
We open the file we will read from.
462
489
int ha_archive::open(const char *name,
463
int mode __attribute__((unused)),
464
uint32_t open_options)
490
int mode __attribute__((__unused__)),
494
DBUG_ENTER("ha_archive::open");
496
DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s",
497
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
467
498
share= get_share(name, &rc);
469
500
if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR))
471
502
/* purecov: begin inspected */
474
505
/* purecov: end */
476
507
else if (rc == HA_ERR_OUT_OF_MEM)
483
514
record_buffer= create_record_buffer(table->s->reclength +
484
515
ARCHIVE_ROW_HEADER_SIZE);
486
517
if (!record_buffer)
489
return(HA_ERR_OUT_OF_MEM);
520
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
492
523
thr_lock_data_init(&share->lock, &lock, NULL);
525
DBUG_PRINT("ha_archive", ("archive table was crashed %s",
526
rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
494
527
if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
554
588
azio_stream create_stream; /* Archive file we are working with */
555
589
File frm_file; /* File handler for readers */
556
590
struct stat file_stat;
557
unsigned char *frm_ptr;
593
DBUG_ENTER("ha_archive::create");
559
595
stats.auto_increment_value= create_info->auto_increment_value;
561
for (uint32_t key= 0; key < table_arg->sizeKeys(); key++)
597
for (uint key= 0; key < table_arg->s->keys; key++)
563
599
KEY *pos= table_arg->key_info+key;
564
600
KEY_PART_INFO *key_part= pos->key_part;
582
619
if (create_info->data_file_name && create_info->data_file_name[0] != '#')
621
DBUG_PRINT("ha_archive", ("archive will create stream file %s",
622
create_info->data_file_name));
584
624
fn_format(name_buff, create_info->data_file_name, "", ARZ,
585
625
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
586
626
fn_format(linkname, name, "", ARZ,
620
660
if (fstat(frm_file, &file_stat))
622
frm_ptr= (unsigned char *)my_malloc(sizeof(unsigned char) * file_stat.st_size, MYF(0));
662
frm_ptr= (uchar *)my_malloc(sizeof(uchar) * file_stat.st_size, MYF(0));
625
665
my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0));
626
666
azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size);
627
free((unsigned char*)frm_ptr);
667
my_free((uchar*)frm_ptr, MYF(0));
630
670
my_close(frm_file, MYF(0));
692
DBUG_PRINT("ha_archive", ("Creating File %s", name_buff));
693
DBUG_PRINT("ha_archive", ("Creating Link %s", linkname));
655
699
delete_table(name);
657
701
/* Return error number, if we got one */
658
return(error ? error : -1);
702
DBUG_RETURN(error ? error : -1);
662
706
This is where the actual row is written out.
664
int ha_archive::real_write_row(unsigned char *buf, azio_stream *writer)
708
int ha_archive::real_write_row(uchar *buf, azio_stream *writer)
666
710
my_off_t written;
667
711
unsigned int r_pack_length;
712
DBUG_ENTER("ha_archive::real_write_row");
669
714
/* We pack the row for writing */
670
715
r_pack_length= pack_row(buf);
687
735
the bytes required for the length in the header.
690
uint32_t ha_archive::max_row_length(const unsigned char *buf __attribute__((unused)))
738
uint32 ha_archive::max_row_length(const uchar *buf __attribute__((__unused__)))
692
uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
740
uint32 length= (uint32)(table->s->reclength + table->s->fields*2);
693
741
length+= ARCHIVE_ROW_HEADER_SIZE;
696
for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
744
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
707
unsigned int ha_archive::pack_row(unsigned char *record)
755
unsigned int ha_archive::pack_row(uchar *record)
759
DBUG_ENTER("ha_archive::pack_row");
711
762
if (fix_rec_buff(max_row_length(record)))
712
return(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
763
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
714
765
/* Copy null bits */
715
766
memcpy(record_buffer->buffer, record, table->s->null_bytes);
721
772
ptr= (*field)->pack(ptr, record + (*field)->offset(record));
724
return((unsigned int) (ptr - record_buffer->buffer));
775
DBUG_PRINT("ha_archive",("Pack row length %u", (unsigned int)
776
(ptr - record_buffer->buffer -
777
ARCHIVE_ROW_HEADER_SIZE)));
779
DBUG_RETURN((unsigned int) (ptr - record_buffer->buffer));
734
789
for implementing start_bulk_insert() is that we could skip
735
790
setting dirty to true each time.
737
int ha_archive::write_row(unsigned char *buf)
792
int ha_archive::write_row(uchar *buf)
740
unsigned char *read_buf= NULL;
795
uchar *read_buf= NULL;
741
796
uint64_t temp_auto;
742
unsigned char *record= table->record[0];
797
uchar *record= table->record[0];
798
DBUG_ENTER("ha_archive::write_row");
744
800
if (share->crashed)
745
return(HA_ERR_CRASHED_ON_USAGE);
801
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
747
803
ha_statistic_increment(&SSV::ha_write_count);
748
804
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
749
805
table->timestamp_field->set_time();
750
806
pthread_mutex_lock(&share->mutex);
752
if (share->archive_write_open == false)
808
if (share->archive_write_open == FALSE)
753
809
if (init_archive_writer())
754
return(HA_ERR_CRASHED_ON_USAGE);
810
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
757
813
if (table->next_number_field && record == table->record[0])
833
889
pthread_mutex_unlock(&share->mutex);
835
free((unsigned char*) read_buf);
891
my_free((uchar*) read_buf, MYF(0));
841
void ha_archive::get_auto_increment(uint64_t offset __attribute__((unused)),
842
uint64_t increment __attribute__((unused)),
843
uint64_t nb_desired_values __attribute__((unused)),
844
uint64_t *first_value __attribute__((unused)),
845
uint64_t *nb_reserved_values __attribute__((unused)))
897
void ha_archive::get_auto_increment(uint64_t offset __attribute__((__unused__)),
898
uint64_t increment __attribute__((__unused__)),
899
uint64_t nb_desired_values __attribute__((__unused__)),
900
uint64_t *first_value __attribute__((__unused__)),
901
uint64_t *nb_reserved_values __attribute__((__unused__)))
847
*nb_reserved_values= UINT64_MAX;
903
*nb_reserved_values= ULONGLONG_MAX;
848
904
*first_value= share->archive_write.auto_increment + 1;
851
907
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
852
int ha_archive::index_init(uint32_t keynr, bool sorted __attribute__((unused)))
908
int ha_archive::index_init(uint keynr, bool sorted __attribute__((__unused__)))
910
DBUG_ENTER("ha_archive::index_init");
854
911
active_index= keynr;
860
917
No indexes, so if we get a request for an index search since we tell
861
918
the optimizer that we have unique indexes, we scan
863
int ha_archive::index_read(unsigned char *buf, const unsigned char *key,
864
uint32_t key_len, enum ha_rkey_function find_flag)
920
int ha_archive::index_read(uchar *buf, const uchar *key,
921
uint key_len, enum ha_rkey_function find_flag)
924
DBUG_ENTER("ha_archive::index_read");
867
925
rc= index_read_idx(buf, active_index, key, key_len, find_flag);
872
int ha_archive::index_read_idx(unsigned char *buf, uint32_t index, const unsigned char *key,
874
enum ha_rkey_function find_flag __attribute__((unused)))
930
int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
932
enum ha_rkey_function find_flag __attribute__((__unused__)))
901
return(rc ? rc : HA_ERR_END_OF_FILE);
962
DBUG_RETURN(rc ? rc : HA_ERR_END_OF_FILE);
905
int ha_archive::index_next(unsigned char * buf)
966
int ha_archive::index_next(uchar * buf)
970
DBUG_ENTER("ha_archive::index_next");
909
972
while (!(get_row(&archive, buf)))
911
974
if (!memcmp(current_key, buf+current_k_offset, current_key_len))
927
990
int ha_archive::rnd_init(bool scan)
992
DBUG_ENTER("ha_archive::rnd_init");
929
994
if (share->crashed)
930
return(HA_ERR_CRASHED_ON_USAGE);
995
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
932
997
init_archive_reader();
934
999
/* We rewind the file so that we can read from the beginning if scan */
1002
DBUG_PRINT("info", ("archive will retrieve %llu rows",
1003
(uint64_t) scan_rows));
937
1005
if (read_data_header(&archive))
938
return(HA_ERR_CRASHED_ON_USAGE);
1006
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
946
1014
This is the method that is used to read a row. It assumes that the row is
947
1015
positioned where you want it.
949
int ha_archive::get_row(azio_stream *file_to_read, unsigned char *buf)
1017
int ha_archive::get_row(azio_stream *file_to_read, uchar *buf)
1020
DBUG_ENTER("ha_archive::get_row");
1021
DBUG_PRINT("ha_archive", ("Picking version for get_row() %d -> %d",
1022
(uchar)file_to_read->version,
953
1024
if (file_to_read->version == ARCHIVE_VERSION)
954
1025
rc= get_row_version3(file_to_read, buf);
1029
DBUG_PRINT("ha_archive", ("Return %d\n", rc));
961
1034
/* Reallocate buffer if needed */
962
1035
bool ha_archive::fix_rec_buff(unsigned int length)
964
assert(record_buffer->buffer);
1037
DBUG_ENTER("ha_archive::fix_rec_buff");
1038
DBUG_PRINT("ha_archive", ("Fixing %u for %u",
1039
length, record_buffer->length));
1040
DBUG_ASSERT(record_buffer->buffer);
966
1042
if (length > record_buffer->length)
968
unsigned char *newptr;
969
if (!(newptr=(unsigned char*) my_realloc((unsigned char*) record_buffer->buffer,
1045
if (!(newptr=(uchar*) my_realloc((uchar*) record_buffer->buffer,
971
1047
MYF(MY_ALLOW_ZERO_PTR))))
973
1049
record_buffer->buffer= newptr;
974
1050
record_buffer->length= length;
977
assert(length <= record_buffer->length);
1053
DBUG_ASSERT(length <= record_buffer->length);
982
int ha_archive::unpack_row(azio_stream *file_to_read, unsigned char *record)
1058
int ha_archive::unpack_row(azio_stream *file_to_read, uchar *record)
1060
DBUG_ENTER("ha_archive::unpack_row");
984
1062
unsigned int read;
986
const unsigned char *ptr;
988
1066
read= azread_row(file_to_read, &error);
989
ptr= (const unsigned char *)file_to_read->row_ptr;
1067
ptr= (const uchar *)file_to_read->row_ptr;
991
1069
if (error || read == 0)
996
1074
/* Copy null bits */
997
memcpy(record, ptr, table->getNullBytes());
998
ptr+= table->getNullBytes();
1075
memcpy(record, ptr, table->s->null_bytes);
1076
ptr+= table->s->null_bytes;
999
1077
for (Field **field=table->field ; *field ; field++)
1001
1079
if (!((*field)->is_null()))
1003
1081
ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
1010
int ha_archive::get_row_version3(azio_stream *file_to_read, unsigned char *buf)
1088
int ha_archive::get_row_version3(azio_stream *file_to_read, uchar *buf)
1090
DBUG_ENTER("ha_archive::get_row_version3");
1012
1092
int returnable= unpack_row(file_to_read, buf);
1094
DBUG_RETURN(returnable);
1020
1100
or by having had ha_archive::rnd_pos() called before it is called.
1023
int ha_archive::rnd_next(unsigned char *buf)
1103
int ha_archive::rnd_next(uchar *buf)
1106
DBUG_ENTER("ha_archive::rnd_next");
1027
1108
if (share->crashed)
1028
return(HA_ERR_CRASHED_ON_USAGE);
1109
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
1030
1111
if (!scan_rows)
1031
return(HA_ERR_END_OF_FILE);
1112
DBUG_RETURN(HA_ERR_END_OF_FILE);
1034
1115
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1061
1143
correctly ordered row.
1064
int ha_archive::rnd_pos(unsigned char * buf, unsigned char *pos)
1146
int ha_archive::rnd_pos(uchar * buf, uchar *pos)
1148
DBUG_ENTER("ha_archive::rnd_pos");
1066
1149
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1067
1150
current_position= (my_off_t)my_get_ptr(pos, ref_length);
1068
1151
if (azseek(&archive, (size_t)current_position, SEEK_SET) == (size_t)(-1L))
1069
return(HA_ERR_CRASHED_ON_USAGE);
1070
return(get_row(&archive, buf));
1152
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
1153
DBUG_RETURN(get_row(&archive, buf));
1078
1161
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
1163
DBUG_ENTER("ha_archive::repair");
1080
1164
check_opt->flags= T_EXTEND;
1081
1165
int rc= optimize(thd, check_opt);
1084
return(HA_ERR_CRASHED_ON_REPAIR);
1168
DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
1086
share->crashed= false;
1170
share->crashed= FALSE;
1091
1175
The table can become fragmented if data was inserted, read, and then
1092
1176
inserted again. What we do is open up the file and recompress it completely.
1094
int ha_archive::optimize(THD* thd __attribute__((unused)),
1095
HA_CHECK_OPT* check_opt __attribute__((unused)))
1178
int ha_archive::optimize(THD* thd __attribute__((__unused__)),
1179
HA_CHECK_OPT* check_opt __attribute__((__unused__)))
1181
DBUG_ENTER("ha_archive::optimize");
1098
1183
azio_stream writer;
1099
1184
char writer_filename[FN_REFLEN];
1104
1189
if (share->archive_write_open)
1106
1191
azclose(&(share->archive_write));
1107
share->archive_write_open= false;
1192
share->archive_write_open= FALSE;
1110
1195
/* Lets create a file to contain the new data */
1111
1196
fn_format(writer_filename, share->table_name, "", ARN,
1112
1197
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
1114
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1115
return(HA_ERR_CRASHED_ON_USAGE);
1199
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
1200
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
1118
1203
An extended rebuild is a lot more effort. We open up each row and re-record it.
1167
1255
(share->archive_write.auto_increment= auto_value) + 1;
1258
dbug_tmp_restore_column_map(table->read_set, org_bitmap);
1170
1259
share->rows_recorded= (ha_rows)writer.rows;
1262
DBUG_PRINT("info", ("recovered %llu archive rows",
1263
(uint64_t)share->rows_recorded));
1265
DBUG_PRINT("ha_archive", ("recovered %llu archive rows",
1266
(uint64_t)share->rows_recorded));
1173
1268
if (rc && rc != HA_ERR_END_OF_FILE)
1200
1296
enum thr_lock_type lock_type)
1202
1298
if (lock_type == TL_WRITE_DELAYED)
1203
delayed_insert= true;
1299
delayed_insert= TRUE;
1205
delayed_insert= false;
1301
delayed_insert= FALSE;
1207
1303
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1210
1306
Here is where we get into the guts of a row level lock.
1211
1307
If TL_UNLOCK is set
1212
If we are not doing a LOCK Table or DISCARD/IMPORT
1308
If we are not doing a LOCK TABLE or DISCARD/IMPORT
1213
1309
TABLESPACE, then allow multiple writers
1248
1346
if (!(my_readlink(share->real_path, share->data_file_name, MYF(0))))
1249
1347
create_info->data_file_name= share->real_path;
1256
1354
Hints for optimizer, see ha_tina for more information
1258
int ha_archive::info(uint32_t flag)
1356
int ha_archive::info(uint flag)
1358
DBUG_ENTER("ha_archive::info");
1261
1361
If dirty, we lock, and then reset/flush the data.
1262
1362
I found that just calling azflush() doesn't always work.
1264
1364
pthread_mutex_lock(&share->mutex);
1265
if (share->dirty == true)
1365
if (share->dirty == TRUE)
1367
DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
1267
1368
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1268
1369
share->rows_recorded= share->archive_write.rows;
1269
share->dirty= false;
1370
share->dirty= FALSE;
1270
1371
if (share->version < global_version)
1272
1373
share->version_rows= share->rows_recorded;
1285
1386
scan_rows= stats.records;
1286
1387
stats.deleted= 0;
1389
DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records));
1288
1390
/* Costs quite a bit more to get all information */
1289
1391
if (flag & HA_STATUS_TIME)
1291
1393
struct stat file_stat; // Stat information for the data file
1293
stat(share->data_file_name, &file_stat);
1395
VOID(stat(share->data_file_name, &file_stat));
1295
stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
1397
stats.mean_rec_length= table->s->reclength + buffer.alloced_length();
1296
1398
stats.data_file_length= file_stat.st_size;
1297
1399
stats.create_time= file_stat.st_ctime;
1298
1400
stats.update_time= file_stat.st_mtime;
1364
1470
int ha_archive::check(THD* thd,
1365
HA_CHECK_OPT* check_opt __attribute__((unused)))
1471
HA_CHECK_OPT* check_opt __attribute__((__unused__)))
1368
1474
const char *old_proc_info;
1476
DBUG_ENTER("ha_archive::check");
1371
old_proc_info= get_thd_proc_info(thd);
1372
set_thd_proc_info(thd, "Checking table");
1478
old_proc_info= thd_proc_info(thd, "Checking table");
1373
1479
/* Flush any waiting data */
1374
1480
pthread_mutex_lock(&share->mutex);
1375
1481
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1393
set_thd_proc_info(thd, old_proc_info);
1499
thd_proc_info(thd, old_proc_info);
1395
1501
if ((rc && rc != HA_ERR_END_OF_FILE))
1397
share->crashed= false;
1398
return(HA_ADMIN_CORRUPT);
1503
share->crashed= FALSE;
1504
DBUG_RETURN(HA_ADMIN_CORRUPT);
1402
return(HA_ADMIN_OK);
1508
DBUG_RETURN(HA_ADMIN_OK);
1409
1515
bool ha_archive::check_and_repair(THD *thd)
1411
1517
HA_CHECK_OPT check_opt;
1518
DBUG_ENTER("ha_archive::check_and_repair");
1413
1520
check_opt.init();
1415
return(repair(thd, &check_opt));
1522
DBUG_RETURN(repair(thd, &check_opt));
1418
1525
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1527
DBUG_ENTER("ha_archive::create_record_buffer");
1420
1528
archive_record_buffer *r;
1422
1530
(archive_record_buffer*) my_malloc(sizeof(archive_record_buffer),
1425
return(NULL); /* purecov: inspected */
1533
DBUG_RETURN(NULL); /* purecov: inspected */
1427
1535
r->length= (int)length;
1429
if (!(r->buffer= (unsigned char*) my_malloc(r->length,
1537
if (!(r->buffer= (uchar*) my_malloc(r->length,
1433
return(NULL); /* purecov: inspected */
1540
my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
1541
DBUG_RETURN(NULL); /* purecov: inspected */
1439
1547
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1441
free((char*) r->buffer);
1549
DBUG_ENTER("ha_archive::destroy_record_buffer");
1550
my_free((char*) r->buffer, MYF(MY_ALLOW_ZERO_PTR));
1551
my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
1446
static DRIZZLE_SYSVAR_BOOL(aio, archive_use_aio,
1555
static MYSQL_SYSVAR_BOOL(aio, archive_use_aio,
1447
1556
PLUGIN_VAR_NOCMDOPT,
1448
1557
"Whether or not to use asynchronous IO.",
1451
1560
static struct st_mysql_sys_var* archive_system_variables[]= {
1452
DRIZZLE_SYSVAR(aio),
1565
struct st_mysql_storage_engine archive_storage_engine=
1566
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
1456
1568
mysql_declare_plugin(archive)
1458
DRIZZLE_STORAGE_ENGINE_PLUGIN,
1570
MYSQL_STORAGE_ENGINE_PLUGIN,
1571
&archive_storage_engine,
1461
1573
"Brian Aker, MySQL AB",
1462
1574
"Archive storage engine",
1463
1575
PLUGIN_LICENSE_GPL,
1464
1576
archive_db_init, /* Plugin Init */
1465
1577
archive_db_done, /* Plugin Deinit */
1466
1579
NULL, /* status variables */
1467
1580
archive_system_variables, /* system variables */
1468
1581
NULL /* config options */