13
12
You should have received a copy of the GNU General Public License
14
13
along with this program; if not, write to the Free Software
15
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
20
#include "plugin/archive/archive_engine.h"
22
#include <boost/scoped_ptr.hpp>
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
17
#include "drizzled/server_includes.h"
18
#include "drizzled/field.h"
19
#include "drizzled/field/blob.h"
20
#include "drizzled/field/timestamp.h"
21
#include "plugin/myisam/myisam.h"
22
#include "drizzled/table.h"
23
#include "drizzled/session.h"
24
#include <mysys/my_dir.h>
26
#include "ha_archive.h"
24
32
using namespace std;
25
using namespace drizzled;
34
static const string engine_name("ARCHIVE");
29
37
First, if you want to understand storage engines you should look at
96
/* When the engine starts up set the first version */
97
static uint64_t global_version= 1;
99
// We use this to find out the state of the archive aio option.
100
extern bool archive_aio_state(void);
104
/* Variables for archive share methods */
105
pthread_mutex_t archive_mutex= PTHREAD_MUTEX_INITIALIZER;
107
std::map<const char *, ArchiveShare *> archive_open_tables;
109
static unsigned int global_version;
111
/* The file extension */
112
#define ARZ ".arz" // The data file
113
#define ARN ".ARN" // Files used during an optimize call
117
static bool archive_use_aio= false;
103
120
Number of rows that will force a bulk insert.
110
127
#define ARCHIVE_ROW_HEADER_SIZE 4
112
ArchiveShare *ArchiveEngine::findOpenTable(const string table_name)
114
ArchiveMap::iterator find_iter=
115
archive_open_tables.find(table_name);
117
if (find_iter != archive_open_tables.end())
118
return (*find_iter).second;
123
void ArchiveEngine::addOpenTable(const string &table_name, ArchiveShare *share)
125
archive_open_tables[table_name]= share;
128
void ArchiveEngine::deleteOpenTable(const string &table_name)
130
archive_open_tables.erase(table_name);
134
int ArchiveEngine::doDropTable(Session&, const TableIdentifier &identifier)
136
string new_path(identifier.getPath());
130
We just implement one additional file extension.
132
static const char *ha_archive_exts[] = {
137
class ArchiveEngine : public drizzled::plugin::StorageEngine
140
ArchiveEngine(const string &name_arg)
141
: drizzled::plugin::StorageEngine(name_arg,
143
| HTON_HAS_DATA_DICTIONARY)
145
table_definition_ext= ARZ;
148
uint64_t table_flags() const
150
return (HA_NO_TRANSACTIONS |
151
HA_STATS_RECORDS_IS_EXACT |
155
virtual Cursor *create(TableShare &table,
158
return new (mem_root) ha_archive(*this, table);
161
const char **bas_ext() const {
162
return ha_archive_exts;
165
int doCreateTable(Session *session, const char *table_name,
167
drizzled::message::Table& proto);
169
int doGetTableDefinition(Session& session,
172
const char *table_name,
174
drizzled::message::Table *table_proto);
176
void doGetTableNames(CachedDirectory &directory, string& , set<string>& set_of_names);
178
int doDropTable(Session&, const string table_path);
182
void ArchiveEngine::doGetTableNames(CachedDirectory &directory,
184
set<string>& set_of_names)
186
CachedDirectory::Entries entries= directory.getEntries();
188
for (CachedDirectory::Entries::iterator entry_iter= entries.begin();
189
entry_iter != entries.end(); ++entry_iter)
191
CachedDirectory::Entry *entry= *entry_iter;
192
string *filename= &entry->filename;
194
assert(filename->size());
196
const char *ext= strchr(filename->c_str(), '.');
198
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
199
is_prefix(filename->c_str(), TMP_FILE_PREFIX))
203
char uname[NAME_LEN + 1];
204
uint32_t file_name_len;
206
file_name_len= filename_to_tablename(filename->c_str(), uname, sizeof(uname));
207
// TODO: Remove need for memory copy here
208
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
209
set_of_names.insert(uname);
215
int ArchiveEngine::doDropTable(Session&,
216
const string table_path)
218
string new_path(table_path);
169
boost::scoped_ptr<azio_stream> proto_stream(new azio_stream);
255
azio_stream proto_stream;
170
256
char* proto_string;
171
if (azopen(proto_stream.get(), proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
257
if (azopen(&proto_stream, proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
172
258
return HA_ERR_CRASHED_ON_USAGE;
174
proto_string= (char*)malloc(sizeof(char) * proto_stream->frm_length);
260
proto_string= (char*)malloc(sizeof(char) * proto_stream.frm_length);
175
261
if (proto_string == NULL)
177
azclose(proto_stream.get());
263
azclose(&proto_stream);
181
azread_frm(proto_stream.get(), proto_string);
267
azread_frm(&proto_stream, proto_string);
183
if (table_proto.ParseFromArray(proto_string, proto_stream->frm_length) == false)
269
if (table_proto->ParseFromArray(proto_string, proto_stream.frm_length) == false)
184
270
error= HA_ERR_CRASHED_ON_USAGE;
186
azclose(proto_stream.get());
272
azclose(&proto_stream);
187
273
free(proto_string);
190
/* We set the name from what we've asked for as in RENAME TABLE for ARCHIVE
191
we do not rewrite the table proto (as it's wedged in the file header)
193
table_proto.set_schema(identifier.getSchemaName());
194
table_proto.set_name(identifier.getTableName());
279
static ArchiveEngine *archive_engine= NULL;
282
Initialize the archive Cursor.
293
static int archive_db_init(drizzled::plugin::Registry ®istry)
296
pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST);
297
archive_engine= new ArchiveEngine(engine_name);
298
registry.add(archive_engine);
300
/* When the engine starts up set the first version */
307
Release the archive Cursor.
317
static int archive_db_done(drizzled::plugin::Registry ®istry)
319
registry.remove(archive_engine);
320
delete archive_engine;
322
pthread_mutex_destroy(&archive_mutex);
200
328
ha_archive::ha_archive(drizzled::plugin::StorageEngine &engine_arg,
329
TableShare &table_arg)
202
330
:Cursor(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
204
332
/* Set our original buffer from pre-allocated memory */
205
333
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
207
335
/* The size of the offset value we will use for position() */
208
ref_length= sizeof(internal::my_off_t);
336
ref_length= sizeof(my_off_t);
209
337
archive_reader_open= false;
237
365
memset(&archive_write, 0, sizeof(azio_stream)); /* Archive file we are working with */
238
366
table_name.append(name);
239
data_file_name.assign(table_name);
240
data_file_name.append(ARZ);
367
fn_format(data_file_name, table_name.c_str(), "",
368
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
242
370
We will use this lock for rows.
244
pthread_mutex_init(&_mutex,MY_MUTEX_INIT_FAST);
372
pthread_mutex_init(&mutex,MY_MUTEX_INIT_FAST);
247
375
ArchiveShare::~ArchiveShare()
250
pthread_mutex_destroy(&_mutex);
377
thr_lock_delete(&lock);
378
pthread_mutex_destroy(&mutex);
252
380
We need to make sure we don't reset the crashed state.
253
381
If we open a crashed file, wee need to close it as crashed unless
269
397
anything but reading... open it for write and we will generate null
270
398
compression writes).
272
if (!(azopen(archive_tmp.get(), data_file_name.c_str(), O_RDONLY,
400
if (!(azopen(&archive_tmp, data_file_name, O_RDONLY,
273
401
AZ_METHOD_BLOCK)))
276
*auto_increment= archive_tmp->auto_increment + 1;
277
rows_recorded= (ha_rows)archive_tmp->rows;
278
crashed= archive_tmp->dirty;
404
*auto_increment= archive_tmp.auto_increment + 1;
405
rows_recorded= (ha_rows)archive_tmp.rows;
406
crashed= archive_tmp.dirty;
279
407
if (version < global_version)
281
409
version_rows= rows_recorded;
282
410
version= global_version;
284
azclose(archive_tmp.get());
412
azclose(&archive_tmp);
297
425
ArchiveShare *ha_archive::get_share(const char *table_name, int *rc)
299
ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
301
pthread_mutex_lock(&a_engine->mutex());
303
share= a_engine->findOpenTable(table_name);
428
map<const char *, ArchiveShare *> ::iterator find_iter;
430
pthread_mutex_lock(&archive_mutex);
431
length=(uint) strlen(table_name);
433
find_iter= archive_open_tables.find(table_name);
435
if (find_iter != archive_open_tables.end())
436
share= (*find_iter).second;
309
444
if (share == NULL)
311
pthread_mutex_unlock(&a_engine->mutex());
446
pthread_mutex_unlock(&archive_mutex);
312
447
*rc= HA_ERR_OUT_OF_MEM;
316
451
if (share->prime(&stats.auto_increment_value) == false)
318
pthread_mutex_unlock(&a_engine->mutex());
453
pthread_mutex_unlock(&archive_mutex);
319
454
*rc= HA_ERR_CRASHED_ON_REPAIR;
325
a_engine->addOpenTable(share->table_name, share);
326
thr_lock_init(&share->_lock);
460
archive_open_tables[share->table_name.c_str()]= share;
461
thr_lock_init(&share->lock);
328
463
share->use_count++;
330
464
if (share->crashed)
331
465
*rc= HA_ERR_CRASHED_ON_USAGE;
332
pthread_mutex_unlock(&a_engine->mutex());
466
pthread_mutex_unlock(&archive_mutex);
414
549
We open the file we will read from.
416
int ha_archive::doOpen(const TableIdentifier &identifier, int , uint32_t )
551
int ha_archive::open(const char *name, int, uint32_t)
419
share= get_share(identifier.getPath().c_str(), &rc);
554
share= get_share(name, &rc);
422
557
We either fix it ourselves, or we just take it offline
440
record_buffer.resize(getTable()->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
442
lock.init(&share->_lock);
575
record_buffer= create_record_buffer(table->s->reclength +
576
ARCHIVE_ROW_HEADER_SIZE);
581
return(HA_ERR_OUT_OF_MEM);
584
thr_lock_data_init(&share->lock, &lock, NULL);
447
// Should never be called
448
int ha_archive::open(const char *, int, uint32_t)
500
int ArchiveEngine::doCreateTable(Session &,
635
int ArchiveEngine::doCreateTable(Session *,
636
const char *table_name,
501
637
Table& table_arg,
502
const drizzled::TableIdentifier &identifier,
503
638
drizzled::message::Table& proto)
640
char name_buff[FN_REFLEN];
506
boost::scoped_ptr<azio_stream> create_stream(new azio_stream);
642
azio_stream create_stream; /* Archive file we are working with */
507
643
uint64_t auto_increment_value;
508
644
string serialized_proto;
522
658
if (!(field->flags & AUTO_INCREMENT_FLAG))
529
std::string named_file= identifier.getPath();
530
named_file.append(ARZ);
667
We reuse name_buff since it is available.
669
fn_format(name_buff, table_name, "", ARZ,
670
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
533
if (azopen(create_stream.get(), named_file.c_str(), O_CREAT|O_RDWR,
673
if (azopen(&create_stream, name_buff, O_CREAT|O_RDWR,
534
674
AZ_METHOD_BLOCK) == 0)
537
unlink(named_file.c_str());
539
return(error ? error : -1);
543
proto.SerializeToString(&serialized_proto);
547
unlink(named_file.c_str());
549
return(error ? error : -1);
552
if (azwrite_frm(create_stream.get(), serialized_proto.c_str(),
680
proto.SerializeToString(&serialized_proto);
682
if (azwrite_frm(&create_stream, serialized_proto.c_str(),
553
683
serialized_proto.length()))
555
unlink(named_file.c_str());
557
return(error ? error : -1);
560
686
if (proto.options().has_comment())
562
688
int write_length;
564
write_length= azwrite_comment(create_stream.get(),
690
write_length= azwrite_comment(&create_stream,
565
691
proto.options().comment().c_str(),
566
692
proto.options().comment().length());
568
694
if (write_length < 0)
571
unlink(named_file.c_str());
573
return(error ? error : -1);
578
702
Yes you need to do this, because the starting value
579
703
for the autoincrement may not be zero.
581
create_stream->auto_increment= auto_increment_value ?
705
create_stream.auto_increment= auto_increment_value ?
582
706
auto_increment_value - 1 : 0;
584
if (azclose(create_stream.get()))
708
if (azclose(&create_stream))
587
unlink(named_file.c_str());
589
return(error ? error : -1);
720
/* Return error number, if we got one */
721
return(error ? error : -1);
624
753
uint32_t ha_archive::max_row_length(const unsigned char *)
626
uint32_t length= (uint32_t)(getTable()->getRecordLength() + getTable()->sizeFields()*2);
755
uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
627
756
length+= ARCHIVE_ROW_HEADER_SIZE;
629
758
uint32_t *ptr, *end;
630
for (ptr= getTable()->getBlobField(), end=ptr + getTable()->sizeBlobFields();
759
for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
634
length += 2 + ((Field_blob*)getTable()->getField(*ptr))->get_length();
763
length += 2 + ((Field_blob*)table->field[*ptr])->get_length();
646
775
return(HA_ERR_OUT_OF_MEM);
648
777
/* Copy null bits */
649
memcpy(&record_buffer[0], record, getTable()->getShare()->null_bytes);
650
ptr= &record_buffer[0] + getTable()->getShare()->null_bytes;
778
memcpy(record_buffer->buffer, record, table->s->null_bytes);
779
ptr= record_buffer->buffer + table->s->null_bytes;
652
for (Field **field=getTable()->getFields() ; *field ; field++)
781
for (Field **field=table->field ; *field ; field++)
654
783
if (!((*field)->is_null()))
655
784
ptr= (*field)->pack(ptr, record + (*field)->offset(record));
658
return((unsigned int) (ptr - &record_buffer[0]));
787
return((unsigned int) (ptr - record_buffer->buffer));
668
797
for implementing start_bulk_insert() is that we could skip
669
798
setting dirty to true each time.
671
int ha_archive::doInsertRecord(unsigned char *buf)
800
int ha_archive::write_row(unsigned char *buf)
674
803
unsigned char *read_buf= NULL;
675
804
uint64_t temp_auto;
676
unsigned char *record= getTable()->getInsertRecord();
805
unsigned char *record= table->record[0];
678
807
if (share->crashed)
679
808
return(HA_ERR_CRASHED_ON_USAGE);
681
pthread_mutex_lock(&share->mutex());
810
ha_statistic_increment(&SSV::ha_write_count);
811
pthread_mutex_lock(&share->mutex);
683
813
if (share->archive_write_open == false)
684
814
if (init_archive_writer())
685
815
return(HA_ERR_CRASHED_ON_USAGE);
688
if (getTable()->next_number_field && record == getTable()->getInsertRecord())
818
if (table->next_number_field && record == table->record[0])
820
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
690
821
update_auto_increment();
691
temp_auto= getTable()->next_number_field->val_int();
822
temp_auto= table->next_number_field->val_int();
694
825
We don't support decremening auto_increment. They make the performance
697
828
if (temp_auto <= share->archive_write.auto_increment &&
698
getTable()->getShare()->getKeyInfo(0).flags & HA_NOSAME)
829
mkey->flags & HA_NOSAME)
700
831
rc= HA_ERR_FOUND_DUPP_KEY;
743
874
the optimizer that we have unique indexes, we scan
745
876
int ha_archive::index_read(unsigned char *buf, const unsigned char *key,
746
uint32_t key_len, enum ha_rkey_function)
877
uint32_t key_len, enum ha_rkey_function find_flag)
880
rc= index_read_idx(buf, active_index, key, key_len, find_flag);
885
int ha_archive::index_read_idx(unsigned char *buf, uint32_t index, const unsigned char *key,
886
uint32_t key_len, enum ha_rkey_function)
750
current_k_offset= getTable()->getShare()->getKeyInfo(0).key_part->offset;
890
KEY *mkey= &table->s->key_info[index];
891
current_k_offset= mkey->key_part->offset;
751
892
current_key= key;
752
893
current_key_len= key_len;
754
rc= doStartTableScan(true);
832
973
/* Reallocate buffer if needed */
833
974
bool ha_archive::fix_rec_buff(unsigned int length)
835
record_buffer.resize(length);
976
assert(record_buffer->buffer);
978
if (length > record_buffer->length)
980
unsigned char *newptr;
981
if (!(newptr= (unsigned char *)realloc(record_buffer->buffer, length)))
983
record_buffer->buffer= newptr;
984
record_buffer->length= length;
987
assert(length <= record_buffer->length);
840
992
int ha_archive::unpack_row(azio_stream *file_to_read, unsigned char *record)
854
1006
/* Copy null bits */
855
memcpy(record, ptr, getTable()->getNullBytes());
856
ptr+= getTable()->getNullBytes();
857
for (Field **field= getTable()->getFields() ; *field ; field++)
1007
memcpy(record, ptr, table->getNullBytes());
1008
ptr+= table->getNullBytes();
1009
for (Field **field=table->field ; *field ; field++)
859
1011
if (!((*field)->is_null()))
861
ptr= (*field)->unpack(record + (*field)->offset(getTable()->getInsertRecord()), ptr);
1013
ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
922
1074
int ha_archive::rnd_pos(unsigned char * buf, unsigned char *pos)
924
ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
925
current_position= (internal::my_off_t)internal::my_get_ptr(pos, ref_length);
1076
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1077
current_position= (my_off_t)my_get_ptr(pos, ref_length);
926
1078
if (azseek(&archive, (size_t)current_position, SEEK_SET) == (size_t)(-1L))
927
1079
return(HA_ERR_CRASHED_ON_USAGE);
928
1080
return(get_row(&archive, buf));
971
1124
azread_frm(&archive, proto_string);
973
1126
/* Lets create a file to contain the new data */
974
std::string writer_filename= share->table_name;
975
writer_filename.append(ARN);
1127
fn_format(writer_filename, share->table_name.c_str(), "", ARN,
1128
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
977
if (!(azopen(writer.get(), writer_filename.c_str(), O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1130
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
979
1132
free(proto_string);
980
1133
return(HA_ERR_CRASHED_ON_USAGE);
983
azwrite_frm(writer.get(), proto_string, archive.frm_length);
1136
azwrite_frm(&writer, proto_string, archive.frm_length);
986
1139
An extended rebuild is a lot more effort. We open up each row and re-record it.
1012
1166
rows_restored= archive.rows;
1014
for (uint64_t x= 0; x < rows_restored ; x++)
1168
for (x= 0; x < rows_restored ; x++)
1016
rc= get_row(&archive, getTable()->getInsertRecord());
1170
rc= get_row(&archive, table->record[0]);
1021
real_write_row(getTable()->getInsertRecord(), writer.get());
1175
real_write_row(table->record[0], &writer);
1023
1177
Long term it should be possible to optimize this so that
1024
1178
it is not called on each row.
1026
if (getTable()->found_next_number_field)
1180
if (table->found_next_number_field)
1028
Field *field= getTable()->found_next_number_field;
1182
Field *field= table->found_next_number_field;
1030
1184
/* Since we will need to use field to translate, we need to flip its read bit */
1031
1185
field->setReadSet();
1033
1187
uint64_t auto_value=
1034
(uint64_t) field->val_int_internal(getTable()->getInsertRecord() +
1035
field->offset(getTable()->getInsertRecord()));
1188
(uint64_t) field->val_int(table->record[0] +
1189
field->offset(table->record[0]));
1036
1190
if (share->archive_write.auto_increment < auto_value)
1037
1191
stats.auto_increment_value=
1038
1192
(share->archive_write.auto_increment= auto_value) + 1;
1041
share->rows_recorded= (ha_rows)writer->rows;
1195
share->rows_recorded= (ha_rows)writer.rows;
1044
1198
if (rc && rc != HA_ERR_END_OF_FILE)
1145
1299
struct stat file_stat; // Stat information for the data file
1147
stat(share->data_file_name.c_str(), &file_stat);
1301
stat(share->data_file_name, &file_stat);
1149
stats.mean_rec_length= getTable()->getRecordLength()+ buffer.alloced_length();
1303
stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
1150
1304
stats.data_file_length= file_stat.st_size;
1151
1305
stats.create_time= file_stat.st_ctime;
1152
1306
stats.update_time= file_stat.st_mtime;
1213
1375
const char *old_proc_info;
1215
1378
old_proc_info= get_session_proc_info(session);
1216
1379
set_session_proc_info(session, "Checking table");
1217
1380
/* Flush any waiting data */
1218
pthread_mutex_lock(&share->mutex());
1381
pthread_mutex_lock(&share->mutex);
1219
1382
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1220
pthread_mutex_unlock(&share->mutex());
1383
pthread_mutex_unlock(&share->mutex);
1223
1386
Now we will rewind the archive file so that we are positioned at the
1250
int ArchiveEngine::doRenameTable(Session&, const TableIdentifier &from, const TableIdentifier &to)
1254
for (const char **ext= bas_ext(); *ext ; ext++)
1256
if (rename_file_ext(from.getPath().c_str(), to.getPath().c_str(), *ext))
1258
if ((error=errno) != ENOENT)
1267
bool ArchiveEngine::doDoesTableExist(Session&,
1268
const TableIdentifier &identifier)
1270
string proto_path(identifier.getPath());
1271
proto_path.append(ARZ);
1273
if (access(proto_path.c_str(), F_OK))
1281
void ArchiveEngine::doGetTableIdentifiers(drizzled::CachedDirectory &directory,
1282
const drizzled::SchemaIdentifier &schema_identifier,
1283
drizzled::TableIdentifier::vector &set_of_identifiers)
1285
drizzled::CachedDirectory::Entries entries= directory.getEntries();
1287
for (drizzled::CachedDirectory::Entries::iterator entry_iter= entries.begin();
1288
entry_iter != entries.end(); ++entry_iter)
1290
drizzled::CachedDirectory::Entry *entry= *entry_iter;
1291
const string *filename= &entry->filename;
1293
assert(filename->size());
1295
const char *ext= strchr(filename->c_str(), '.');
1297
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
1298
(filename->compare(0, strlen(TMP_FILE_PREFIX), TMP_FILE_PREFIX) == 0))
1302
char uname[NAME_LEN + 1];
1303
uint32_t file_name_len;
1305
file_name_len= TableIdentifier::filename_to_tablename(filename->c_str(), uname, sizeof(uname));
1306
// TODO: Remove need for memory copy here
1307
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
1309
set_of_identifiers.push_back(TableIdentifier(schema_identifier, uname));
1413
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1415
archive_record_buffer *r;
1416
if (!(r= (archive_record_buffer*) malloc(sizeof(archive_record_buffer))))
1420
r->length= (int)length;
1422
if (!(r->buffer= (unsigned char*) malloc(r->length)))
1431
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1433
free((char*) r->buffer);
1438
static DRIZZLE_SYSVAR_BOOL(aio, archive_use_aio,
1439
PLUGIN_VAR_NOCMDOPT,
1440
"Whether or not to use asynchronous IO.",
1443
static struct st_mysql_sys_var* archive_system_variables[]= {
1444
DRIZZLE_SYSVAR(aio),
1448
drizzle_declare_plugin
1452
"Brian Aker, MySQL AB",
1453
"Archive storage engine",
1455
archive_db_init, /* Plugin Init */
1456
archive_db_done, /* Plugin Deinit */
1457
NULL, /* status variables */
1458
archive_system_variables, /* system variables */
1459
NULL /* config options */
1461
drizzle_declare_plugin_end;