13
13
along with this program; if not, write to the Free Software
14
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
16
#ifdef USE_PRAGMA_IMPLEMENTATION
17
#pragma implementation // gcc: Class implementation
17
#include <drizzled/common_includes.h>
18
#include <storage/myisam/myisam.h>
20
#include "mysql_priv.h"
20
23
#include "ha_archive.h"
26
#include <mysql/plugin.h>
23
29
First, if you want to understand storage engines you should look at
135
141
Used for hash table that tracks open tables.
137
static unsigned char* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
138
bool not_used __attribute__((unused)))
143
static uchar* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
144
my_bool not_used __attribute__((unused)))
140
146
*length=share->table_name_length;
141
return (unsigned char*) share->table_name;
147
return (uchar*) share->table_name;
157
163
int archive_db_init(void *p)
165
DBUG_ENTER("archive_db_init");
159
166
handlerton *archive_hton;
161
168
archive_hton= (handlerton *)p;
162
169
archive_hton->state= SHOW_OPTION_YES;
170
archive_hton->db_type= DB_TYPE_ARCHIVE_DB;
163
171
archive_hton->create= archive_create_handler;
164
172
archive_hton->flags= HTON_NO_FLAGS;
165
173
archive_hton->discover= archive_discover;
172
180
if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
173
181
(hash_get_key) archive_get_key, 0, 0))
175
pthread_mutex_destroy(&archive_mutex);
183
VOID(pthread_mutex_destroy(&archive_mutex));
211
219
/* The size of the offset value we will use for position() */
212
220
ref_length= sizeof(my_off_t);
213
archive_reader_open= false;
221
archive_reader_open= FALSE;
216
int archive_discover(handlerton *hton __attribute__((unused)),
217
THD* thd __attribute__((unused)),
224
int archive_discover(handlerton *hton, THD* thd, const char *db,
219
225
const char *name,
220
unsigned char **frmblob,
229
DBUG_ENTER("archive_discover");
230
DBUG_PRINT("archive_discover", ("db: %s, name: %s", db, name));
223
231
azio_stream frm_stream;
224
232
char az_file[FN_REFLEN];
230
238
if (stat(az_file, &file_stat))
233
if (!(azopen(&frm_stream, az_file, O_RDONLY, AZ_METHOD_BLOCK)))
241
if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY, AZ_METHOD_BLOCK)))
235
243
if (errno == EROFS || errno == EACCES)
236
return(my_errno= errno);
237
return(HA_ERR_CRASHED_ON_USAGE);
244
DBUG_RETURN(my_errno= errno);
245
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
240
248
if (frm_stream.frm_length == 0)
278
288
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
291
DBUG_ENTER("ha_archive::get_share");
282
293
pthread_mutex_lock(&archive_mutex);
283
294
length=(uint) strlen(table_name);
285
296
if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
286
(unsigned char*) table_name,
292
303
if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
293
304
&share, sizeof(*share),
294
305
&tmp_name, length+1,
297
308
pthread_mutex_unlock(&archive_mutex);
298
309
*rc= HA_ERR_OUT_OF_MEM;
302
313
share->use_count= 0;
303
314
share->table_name_length= length;
304
315
share->table_name= tmp_name;
305
share->crashed= false;
306
share->archive_write_open= false;
316
share->crashed= FALSE;
317
share->archive_write_open= FALSE;
307
318
fn_format(share->data_file_name, table_name, "",
308
319
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
309
my_stpcpy(share->table_name, table_name);
320
strmov(share->table_name, table_name);
321
DBUG_PRINT("ha_archive", ("Data File %s",
322
share->data_file_name));
311
324
We will use this lock for rows.
313
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
326
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
316
329
We read the meta file, but do not mark it dirty. Since we are not
318
331
anything but reading... open it for write and we will generate null
319
332
compression writes).
321
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY,
334
if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY,
322
335
AZ_METHOD_BLOCK)))
324
pthread_mutex_destroy(&share->mutex);
337
VOID(pthread_mutex_destroy(&share->mutex));
326
339
pthread_mutex_unlock(&archive_mutex);
327
340
*rc= HA_ERR_CRASHED_ON_REPAIR;
330
343
stats.auto_increment_value= archive_tmp.auto_increment + 1;
331
344
share->rows_recorded= (ha_rows)archive_tmp.rows;
338
351
azclose(&archive_tmp);
340
my_hash_insert(&archive_open_tables, (unsigned char*) share);
353
VOID(my_hash_insert(&archive_open_tables, (uchar*) share));
341
354
thr_lock_init(&share->lock);
343
356
share->use_count++;
357
DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles now",
358
share->table_name_length, share->table_name,
344
360
if (share->crashed)
345
361
*rc= HA_ERR_CRASHED_ON_USAGE;
346
362
pthread_mutex_unlock(&archive_mutex);
356
372
int ha_archive::free_share()
375
DBUG_ENTER("ha_archive::free_share");
376
DBUG_PRINT("ha_archive",
377
("archive table %.*s has %d open handles on entrance",
378
share->table_name_length, share->table_name,
360
381
pthread_mutex_lock(&archive_mutex);
361
382
if (!--share->use_count)
363
hash_delete(&archive_open_tables, (unsigned char*) share);
384
hash_delete(&archive_open_tables, (uchar*) share);
364
385
thr_lock_delete(&share->lock);
365
pthread_mutex_destroy(&share->mutex);
386
VOID(pthread_mutex_destroy(&share->mutex));
367
388
We need to make sure we don't reset the crashed state.
368
389
If we open a crashed file, wee need to close it as crashed unless
370
391
Since we will close the data down after this, we go on and count
371
392
the flush on close;
373
if (share->archive_write_open == true)
394
if (share->archive_write_open == TRUE)
375
396
if (azclose(&(share->archive_write)))
378
free((unsigned char*) share);
399
my_free((uchar*) share, MYF(0));
380
401
pthread_mutex_unlock(&archive_mutex);
385
406
int ha_archive::init_archive_writer()
408
DBUG_ENTER("ha_archive::init_archive_writer");
388
410
It is expensive to open and close the data files and since you can't have
389
411
a gzip file that can be both read and written we keep a writer open
390
412
that is shared amoung all open tables.
392
414
if (!(azopen(&(share->archive_write), share->data_file_name,
393
O_RDWR, AZ_METHOD_BLOCK)))
415
O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
395
share->crashed= true;
417
DBUG_PRINT("ha_archive", ("Could not open archive write file"));
418
share->crashed= TRUE;
398
share->archive_write_open= true;
421
share->archive_write_open= TRUE;
407
430
int ha_archive::init_archive_reader()
432
DBUG_ENTER("ha_archive::init_archive_reader");
410
434
It is expensive to open and close the data files and since you can't have
411
435
a gzip file that can be both read and written we keep a writer open
412
436
that is shared amoung all open tables.
414
if (archive_reader_open == false)
438
if (archive_reader_open == FALSE)
416
440
az_method method;
418
442
switch (archive_use_aio)
421
445
method= AZ_METHOD_BLOCK;
424
448
method= AZ_METHOD_AIO;
427
451
method= AZ_METHOD_BLOCK;
429
if (!(azopen(&archive, share->data_file_name, O_RDONLY,
453
if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY,
432
share->crashed= true;
456
DBUG_PRINT("ha_archive", ("Could not open archive read file"));
457
share->crashed= TRUE;
435
archive_reader_open= true;
460
archive_reader_open= TRUE;
460
485
We open the file we will read from.
462
int ha_archive::open(const char *name,
463
int mode __attribute__((unused)),
464
uint32_t open_options)
487
int ha_archive::open(const char *name, int mode, uint open_options)
490
DBUG_ENTER("ha_archive::open");
492
DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s",
493
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
467
494
share= get_share(name, &rc);
469
496
if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR))
471
498
/* purecov: begin inspected */
474
501
/* purecov: end */
476
503
else if (rc == HA_ERR_OUT_OF_MEM)
483
510
record_buffer= create_record_buffer(table->s->reclength +
484
511
ARCHIVE_ROW_HEADER_SIZE);
486
513
if (!record_buffer)
489
return(HA_ERR_OUT_OF_MEM);
516
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
492
519
thr_lock_data_init(&share->lock, &lock, NULL);
521
DBUG_PRINT("ha_archive", ("archive table was crashed %s",
522
rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
494
523
if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
554
584
azio_stream create_stream; /* Archive file we are working with */
555
585
File frm_file; /* File handler for readers */
556
586
struct stat file_stat;
557
unsigned char *frm_ptr;
589
DBUG_ENTER("ha_archive::create");
559
591
stats.auto_increment_value= create_info->auto_increment_value;
561
for (uint32_t key= 0; key < table_arg->sizeKeys(); key++)
593
for (uint key= 0; key < table_arg->s->keys; key++)
563
595
KEY *pos= table_arg->key_info+key;
564
596
KEY_PART_INFO *key_part= pos->key_part;
582
615
if (create_info->data_file_name && create_info->data_file_name[0] != '#')
617
DBUG_PRINT("ha_archive", ("archive will create stream file %s",
618
create_info->data_file_name));
584
620
fn_format(name_buff, create_info->data_file_name, "", ARZ,
585
621
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
586
622
fn_format(linkname, name, "", ARZ,
620
656
if (fstat(frm_file, &file_stat))
622
frm_ptr= (unsigned char *)my_malloc(sizeof(unsigned char) * file_stat.st_size, MYF(0));
658
frm_ptr= (uchar *)my_malloc(sizeof(uchar) * file_stat.st_size, MYF(0));
625
661
my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0));
626
662
azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size);
627
free((unsigned char*)frm_ptr);
663
my_free((uchar*)frm_ptr, MYF(0));
630
666
my_close(frm_file, MYF(0));
688
DBUG_PRINT("ha_archive", ("Creating File %s", name_buff));
689
DBUG_PRINT("ha_archive", ("Creating Link %s", linkname));
655
695
delete_table(name);
657
697
/* Return error number, if we got one */
658
return(error ? error : -1);
698
DBUG_RETURN(error ? error : -1);
662
702
This is where the actual row is written out.
664
int ha_archive::real_write_row(unsigned char *buf, azio_stream *writer)
704
int ha_archive::real_write_row(uchar *buf, azio_stream *writer)
666
706
my_off_t written;
667
707
unsigned int r_pack_length;
708
DBUG_ENTER("ha_archive::real_write_row");
669
710
/* We pack the row for writing */
670
711
r_pack_length= pack_row(buf);
687
731
the bytes required for the length in the header.
690
uint32_t ha_archive::max_row_length(const unsigned char *buf __attribute__((unused)))
734
uint32 ha_archive::max_row_length(const uchar *buf)
692
uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
736
uint32 length= (uint32)(table->s->reclength + table->s->fields*2);
693
737
length+= ARCHIVE_ROW_HEADER_SIZE;
696
for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
740
for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
707
unsigned int ha_archive::pack_row(unsigned char *record)
751
unsigned int ha_archive::pack_row(uchar *record)
755
DBUG_ENTER("ha_archive::pack_row");
711
758
if (fix_rec_buff(max_row_length(record)))
712
return(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
759
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
714
761
/* Copy null bits */
715
762
memcpy(record_buffer->buffer, record, table->s->null_bytes);
721
768
ptr= (*field)->pack(ptr, record + (*field)->offset(record));
724
return((unsigned int) (ptr - record_buffer->buffer));
771
DBUG_PRINT("ha_archive",("Pack row length %u", (unsigned int)
772
(ptr - record_buffer->buffer -
773
ARCHIVE_ROW_HEADER_SIZE)));
775
DBUG_RETURN((unsigned int) (ptr - record_buffer->buffer));
734
785
for implementing start_bulk_insert() is that we could skip
735
786
setting dirty to true each time.
737
int ha_archive::write_row(unsigned char *buf)
788
int ha_archive::write_row(uchar *buf)
740
unsigned char *read_buf= NULL;
791
uchar *read_buf= NULL;
741
792
uint64_t temp_auto;
742
unsigned char *record= table->record[0];
793
uchar *record= table->record[0];
794
DBUG_ENTER("ha_archive::write_row");
744
796
if (share->crashed)
745
return(HA_ERR_CRASHED_ON_USAGE);
797
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
747
799
ha_statistic_increment(&SSV::ha_write_count);
748
800
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
749
801
table->timestamp_field->set_time();
750
802
pthread_mutex_lock(&share->mutex);
752
if (share->archive_write_open == false)
804
if (share->archive_write_open == FALSE)
753
805
if (init_archive_writer())
754
return(HA_ERR_CRASHED_ON_USAGE);
806
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
757
809
if (table->next_number_field && record == table->record[0])
833
885
pthread_mutex_unlock(&share->mutex);
835
free((unsigned char*) read_buf);
887
my_free((uchar*) read_buf, MYF(0));
841
void ha_archive::get_auto_increment(uint64_t offset __attribute__((unused)),
842
uint64_t increment __attribute__((unused)),
843
uint64_t nb_desired_values __attribute__((unused)),
844
uint64_t *first_value __attribute__((unused)),
845
uint64_t *nb_reserved_values __attribute__((unused)))
893
void ha_archive::get_auto_increment(uint64_t offset, uint64_t increment,
894
uint64_t nb_desired_values,
895
uint64_t *first_value,
896
uint64_t *nb_reserved_values)
847
*nb_reserved_values= UINT64_MAX;
898
*nb_reserved_values= ULONGLONG_MAX;
848
899
*first_value= share->archive_write.auto_increment + 1;
851
902
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
852
int ha_archive::index_init(uint32_t keynr, bool sorted __attribute__((unused)))
903
int ha_archive::index_init(uint keynr, bool sorted)
905
DBUG_ENTER("ha_archive::index_init");
854
906
active_index= keynr;
860
912
No indexes, so if we get a request for an index search since we tell
861
913
the optimizer that we have unique indexes, we scan
863
int ha_archive::index_read(unsigned char *buf, const unsigned char *key,
864
uint32_t key_len, enum ha_rkey_function find_flag)
915
int ha_archive::index_read(uchar *buf, const uchar *key,
916
uint key_len, enum ha_rkey_function find_flag)
919
DBUG_ENTER("ha_archive::index_read");
867
920
rc= index_read_idx(buf, active_index, key, key_len, find_flag);
872
int ha_archive::index_read_idx(unsigned char *buf, uint32_t index, const unsigned char *key,
874
enum ha_rkey_function find_flag __attribute__((unused)))
925
int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
926
uint key_len, enum ha_rkey_function find_flag)
901
return(rc ? rc : HA_ERR_END_OF_FILE);
956
DBUG_RETURN(rc ? rc : HA_ERR_END_OF_FILE);
905
int ha_archive::index_next(unsigned char * buf)
960
int ha_archive::index_next(uchar * buf)
964
DBUG_ENTER("ha_archive::index_next");
909
966
while (!(get_row(&archive, buf)))
911
968
if (!memcmp(current_key, buf+current_k_offset, current_key_len))
927
984
int ha_archive::rnd_init(bool scan)
986
DBUG_ENTER("ha_archive::rnd_init");
929
988
if (share->crashed)
930
return(HA_ERR_CRASHED_ON_USAGE);
989
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
932
991
init_archive_reader();
934
993
/* We rewind the file so that we can read from the beginning if scan */
996
DBUG_PRINT("info", ("archive will retrieve %llu rows",
997
(unsigned long long) scan_rows));
937
999
if (read_data_header(&archive))
938
return(HA_ERR_CRASHED_ON_USAGE);
1000
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
946
1008
This is the method that is used to read a row. It assumes that the row is
947
1009
positioned where you want it.
949
int ha_archive::get_row(azio_stream *file_to_read, unsigned char *buf)
1011
int ha_archive::get_row(azio_stream *file_to_read, uchar *buf)
1014
DBUG_ENTER("ha_archive::get_row");
1015
DBUG_PRINT("ha_archive", ("Picking version for get_row() %d -> %d",
1016
(uchar)file_to_read->version,
953
1018
if (file_to_read->version == ARCHIVE_VERSION)
954
1019
rc= get_row_version3(file_to_read, buf);
1023
DBUG_PRINT("ha_archive", ("Return %d\n", rc));
961
1028
/* Reallocate buffer if needed */
962
1029
bool ha_archive::fix_rec_buff(unsigned int length)
964
assert(record_buffer->buffer);
1031
DBUG_ENTER("ha_archive::fix_rec_buff");
1032
DBUG_PRINT("ha_archive", ("Fixing %u for %u",
1033
length, record_buffer->length));
1034
DBUG_ASSERT(record_buffer->buffer);
966
1036
if (length > record_buffer->length)
968
unsigned char *newptr;
969
if (!(newptr=(unsigned char*) my_realloc((unsigned char*) record_buffer->buffer,
1039
if (!(newptr=(uchar*) my_realloc((uchar*) record_buffer->buffer,
971
1041
MYF(MY_ALLOW_ZERO_PTR))))
973
1043
record_buffer->buffer= newptr;
974
1044
record_buffer->length= length;
977
assert(length <= record_buffer->length);
1047
DBUG_ASSERT(length <= record_buffer->length);
982
int ha_archive::unpack_row(azio_stream *file_to_read, unsigned char *record)
1052
int ha_archive::unpack_row(azio_stream *file_to_read, uchar *record)
1054
DBUG_ENTER("ha_archive::unpack_row");
984
1056
unsigned int read;
986
const unsigned char *ptr;
988
1060
read= azread_row(file_to_read, &error);
989
ptr= (const unsigned char *)file_to_read->row_ptr;
1061
ptr= (const uchar *)file_to_read->row_ptr;
991
1063
if (error || read == 0)
996
1068
/* Copy null bits */
997
memcpy(record, ptr, table->getNullBytes());
998
ptr+= table->getNullBytes();
1069
memcpy(record, ptr, table->s->null_bytes);
1070
ptr+= table->s->null_bytes;
999
1071
for (Field **field=table->field ; *field ; field++)
1001
1073
if (!((*field)->is_null()))
1003
1075
ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
1010
int ha_archive::get_row_version3(azio_stream *file_to_read, unsigned char *buf)
1082
int ha_archive::get_row_version3(azio_stream *file_to_read, uchar *buf)
1084
DBUG_ENTER("ha_archive::get_row_version3");
1012
1086
int returnable= unpack_row(file_to_read, buf);
1088
DBUG_RETURN(returnable);
1020
1094
or by having had ha_archive::rnd_pos() called before it is called.
1023
int ha_archive::rnd_next(unsigned char *buf)
1097
int ha_archive::rnd_next(uchar *buf)
1100
DBUG_ENTER("ha_archive::rnd_next");
1027
1102
if (share->crashed)
1028
return(HA_ERR_CRASHED_ON_USAGE);
1103
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
1030
1105
if (!scan_rows)
1031
return(HA_ERR_END_OF_FILE);
1106
DBUG_RETURN(HA_ERR_END_OF_FILE);
1034
1109
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1061
1137
correctly ordered row.
1064
int ha_archive::rnd_pos(unsigned char * buf, unsigned char *pos)
1140
int ha_archive::rnd_pos(uchar * buf, uchar *pos)
1142
DBUG_ENTER("ha_archive::rnd_pos");
1066
1143
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1067
1144
current_position= (my_off_t)my_get_ptr(pos, ref_length);
1068
1145
if (azseek(&archive, (size_t)current_position, SEEK_SET) == (size_t)(-1L))
1069
return(HA_ERR_CRASHED_ON_USAGE);
1070
return(get_row(&archive, buf));
1146
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
1147
DBUG_RETURN(get_row(&archive, buf));
1078
1155
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
1157
DBUG_ENTER("ha_archive::repair");
1080
1158
check_opt->flags= T_EXTEND;
1081
1159
int rc= optimize(thd, check_opt);
1084
return(HA_ERR_CRASHED_ON_REPAIR);
1162
DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
1086
share->crashed= false;
1164
share->crashed= FALSE;
1091
1169
The table can become fragmented if data was inserted, read, and then
1092
1170
inserted again. What we do is open up the file and recompress it completely.
1094
int ha_archive::optimize(THD* thd __attribute__((unused)),
1095
HA_CHECK_OPT* check_opt __attribute__((unused)))
1172
int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
1174
DBUG_ENTER("ha_archive::optimize");
1098
1176
azio_stream writer;
1099
1177
char writer_filename[FN_REFLEN];
1104
1182
if (share->archive_write_open)
1106
1184
azclose(&(share->archive_write));
1107
share->archive_write_open= false;
1185
share->archive_write_open= FALSE;
1110
1188
/* Lets create a file to contain the new data */
1111
1189
fn_format(writer_filename, share->table_name, "", ARN,
1112
1190
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
1114
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1115
return(HA_ERR_CRASHED_ON_USAGE);
1192
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
1193
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
1118
1196
An extended rebuild is a lot more effort. We open up each row and re-record it.
1167
1248
(share->archive_write.auto_increment= auto_value) + 1;
1251
dbug_tmp_restore_column_map(table->read_set, org_bitmap);
1170
1252
share->rows_recorded= (ha_rows)writer.rows;
1255
DBUG_PRINT("info", ("recovered %llu archive rows",
1256
(unsigned long long)share->rows_recorded));
1258
DBUG_PRINT("ha_archive", ("recovered %llu archive rows",
1259
(unsigned long long)share->rows_recorded));
1173
1261
if (rc && rc != HA_ERR_END_OF_FILE)
1200
1289
enum thr_lock_type lock_type)
1202
1291
if (lock_type == TL_WRITE_DELAYED)
1203
delayed_insert= true;
1292
delayed_insert= TRUE;
1205
delayed_insert= false;
1294
delayed_insert= FALSE;
1207
1296
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
1210
1299
Here is where we get into the guts of a row level lock.
1211
1300
If TL_UNLOCK is set
1212
If we are not doing a LOCK Table or DISCARD/IMPORT
1301
If we are not doing a LOCK TABLE or DISCARD/IMPORT
1213
1302
TABLESPACE, then allow multiple writers
1248
1339
if (!(my_readlink(share->real_path, share->data_file_name, MYF(0))))
1249
1340
create_info->data_file_name= share->real_path;
1256
1347
Hints for optimizer, see ha_tina for more information
1258
int ha_archive::info(uint32_t flag)
1349
int ha_archive::info(uint flag)
1351
DBUG_ENTER("ha_archive::info");
1261
1354
If dirty, we lock, and then reset/flush the data.
1262
1355
I found that just calling azflush() doesn't always work.
1264
1357
pthread_mutex_lock(&share->mutex);
1265
if (share->dirty == true)
1358
if (share->dirty == TRUE)
1360
DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
1267
1361
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1268
1362
share->rows_recorded= share->archive_write.rows;
1269
share->dirty= false;
1363
share->dirty= FALSE;
1270
1364
if (share->version < global_version)
1272
1366
share->version_rows= share->rows_recorded;
1285
1379
scan_rows= stats.records;
1286
1380
stats.deleted= 0;
1382
DBUG_PRINT("ha_archive", ("Stats rows is %d\n", (int)stats.records));
1288
1383
/* Costs quite a bit more to get all information */
1289
1384
if (flag & HA_STATUS_TIME)
1291
1386
struct stat file_stat; // Stat information for the data file
1293
stat(share->data_file_name, &file_stat);
1388
VOID(stat(share->data_file_name, &file_stat));
1295
stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
1390
stats.mean_rec_length= table->s->reclength + buffer.alloced_length();
1296
1391
stats.data_file_length= file_stat.st_size;
1297
1392
stats.create_time= file_stat.st_ctime;
1298
1393
stats.update_time= file_stat.st_mtime;
1355
1453
bool ha_archive::is_crashed() const
1357
return(share->crashed);
1455
DBUG_ENTER("ha_archive::is_crashed");
1456
DBUG_RETURN(share->crashed);
1361
1460
Simple scan of the tables to make sure everything is ok.
1364
int ha_archive::check(THD* thd,
1365
HA_CHECK_OPT* check_opt __attribute__((unused)))
1463
int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
1368
1466
const char *old_proc_info;
1467
unsigned long long x;
1468
DBUG_ENTER("ha_archive::check");
1371
old_proc_info= get_thd_proc_info(thd);
1372
set_thd_proc_info(thd, "Checking table");
1470
old_proc_info= thd_proc_info(thd, "Checking table");
1373
1471
/* Flush any waiting data */
1374
1472
pthread_mutex_lock(&share->mutex);
1375
1473
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1393
set_thd_proc_info(thd, old_proc_info);
1491
thd_proc_info(thd, old_proc_info);
1395
1493
if ((rc && rc != HA_ERR_END_OF_FILE))
1397
share->crashed= false;
1398
return(HA_ADMIN_CORRUPT);
1495
share->crashed= FALSE;
1496
DBUG_RETURN(HA_ADMIN_CORRUPT);
1402
return(HA_ADMIN_OK);
1500
DBUG_RETURN(HA_ADMIN_OK);
1409
1507
bool ha_archive::check_and_repair(THD *thd)
1411
1509
HA_CHECK_OPT check_opt;
1510
DBUG_ENTER("ha_archive::check_and_repair");
1413
1512
check_opt.init();
1415
return(repair(thd, &check_opt));
1514
DBUG_RETURN(repair(thd, &check_opt));
1418
1517
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1519
DBUG_ENTER("ha_archive::create_record_buffer");
1420
1520
archive_record_buffer *r;
1422
1522
(archive_record_buffer*) my_malloc(sizeof(archive_record_buffer),
1425
return(NULL); /* purecov: inspected */
1525
DBUG_RETURN(NULL); /* purecov: inspected */
1427
1527
r->length= (int)length;
1429
if (!(r->buffer= (unsigned char*) my_malloc(r->length,
1529
if (!(r->buffer= (uchar*) my_malloc(r->length,
1433
return(NULL); /* purecov: inspected */
1532
my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
1533
DBUG_RETURN(NULL); /* purecov: inspected */
1439
1539
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1441
free((char*) r->buffer);
1541
DBUG_ENTER("ha_archive::destroy_record_buffer");
1542
my_free((char*) r->buffer, MYF(MY_ALLOW_ZERO_PTR));
1543
my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
1446
static DRIZZLE_SYSVAR_BOOL(aio, archive_use_aio,
1547
static MYSQL_SYSVAR_BOOL(aio, archive_use_aio,
1447
1548
PLUGIN_VAR_NOCMDOPT,
1448
1549
"Whether or not to use asynchronous IO.",
1451
1552
static struct st_mysql_sys_var* archive_system_variables[]= {
1452
DRIZZLE_SYSVAR(aio),
1557
struct st_mysql_storage_engine archive_storage_engine=
1558
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
1456
1560
mysql_declare_plugin(archive)
1458
DRIZZLE_STORAGE_ENGINE_PLUGIN,
1562
MYSQL_STORAGE_ENGINE_PLUGIN,
1563
&archive_storage_engine,
1461
1565
"Brian Aker, MySQL AB",
1462
1566
"Archive storage engine",
1463
1567
PLUGIN_LICENSE_GPL,
1464
1568
archive_db_init, /* Plugin Init */
1465
1569
archive_db_done, /* Plugin Deinit */
1466
1571
NULL, /* status variables */
1467
1572
archive_system_variables, /* system variables */
1468
1573
NULL /* config options */