13
12
You should have received a copy of the GNU General Public License
14
13
along with this program; if not, write to the Free Software
15
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
18
17
#include "config.h"
20
#include "plugin/archive/archive_engine.h"
22
#include <boost/scoped_ptr.hpp>
18
#include "drizzled/field.h"
19
#include "drizzled/field/blob.h"
20
#include "drizzled/field/timestamp.h"
21
#include "plugin/myisam/myisam.h"
22
#include "drizzled/table.h"
23
#include "drizzled/session.h"
25
#include "ha_archive.h"
24
35
using namespace std;
25
using namespace drizzled;
29
38
First, if you want to understand storage engines you should look at
96
/* When the engine starts up set the first version */
97
static uint64_t global_version= 1;
99
// We use this to find out the state of the archive aio option.
100
extern bool archive_aio_state(void);
105
/* Variables for archive share methods */
106
pthread_mutex_t archive_mutex= PTHREAD_MUTEX_INITIALIZER;
108
static unsigned int global_version;
110
/* The file extension */
111
#define ARZ ".arz" // The data file
112
#define ARN ".ARN" // Files used during an optimize call
116
static bool archive_use_aio= false;
103
119
Number of rows that will force a bulk insert.
110
126
#define ARCHIVE_ROW_HEADER_SIZE 4
129
We just implement one additional file extension.
131
static const char *ha_archive_exts[] = {
136
class ArchiveEngine : public drizzled::plugin::StorageEngine
138
typedef std::map<string, ArchiveShare*> ArchiveMap;
139
ArchiveMap archive_open_tables;
142
ArchiveEngine(const string &name_arg)
143
: drizzled::plugin::StorageEngine(name_arg,
145
HTON_STATS_RECORDS_IS_EXACT |
147
HTON_HAS_DATA_DICTIONARY),
148
archive_open_tables()
150
table_definition_ext= ARZ;
153
virtual Cursor *create(TableShare &table,
154
drizzled::memory::Root *mem_root)
156
return new (mem_root) ha_archive(*this, table);
159
const char **bas_ext() const {
160
return ha_archive_exts;
163
int doCreateTable(Session *session, const char *table_name,
165
drizzled::message::Table& proto);
167
int doGetTableDefinition(Session& session,
170
const char *table_name,
172
drizzled::message::Table *table_proto);
174
void doGetTableNames(drizzled::CachedDirectory &directory, string& , set<string>& set_of_names);
176
int doDropTable(Session&, const string table_path);
177
ArchiveShare *findOpenTable(const string table_name);
178
void addOpenTable(const string &table_name, ArchiveShare *);
179
void deleteOpenTable(const string &table_name);
181
uint32_t max_supported_keys() const { return 1; }
182
uint32_t max_supported_key_length() const { return sizeof(uint64_t); }
183
uint32_t max_supported_key_part_length() const { return sizeof(uint64_t); }
185
uint32_t index_flags(enum ha_key_alg) const
187
return HA_ONLY_WHOLE_INDEX;
112
191
ArchiveShare *ArchiveEngine::findOpenTable(const string table_name)
114
193
ArchiveMap::iterator find_iter=
134
int ArchiveEngine::doDropTable(Session&, const TableIdentifier &identifier)
136
string new_path(identifier.getPath());
213
void ArchiveEngine::doGetTableNames(drizzled::CachedDirectory &directory,
215
set<string>& set_of_names)
217
drizzled::CachedDirectory::Entries entries= directory.getEntries();
219
for (drizzled::CachedDirectory::Entries::iterator entry_iter= entries.begin();
220
entry_iter != entries.end(); ++entry_iter)
222
drizzled::CachedDirectory::Entry *entry= *entry_iter;
223
const string *filename= &entry->filename;
225
assert(filename->size());
227
const char *ext= strchr(filename->c_str(), '.');
229
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
230
(filename->compare(0, strlen(TMP_FILE_PREFIX), TMP_FILE_PREFIX) == 0))
234
char uname[NAME_LEN + 1];
235
uint32_t file_name_len;
237
file_name_len= filename_to_tablename(filename->c_str(), uname, sizeof(uname));
238
// TODO: Remove need for memory copy here
239
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
240
set_of_names.insert(uname);
246
int ArchiveEngine::doDropTable(Session&,
247
const string table_path)
249
string new_path(table_path);
169
boost::scoped_ptr<azio_stream> proto_stream(new azio_stream);
286
azio_stream proto_stream;
170
287
char* proto_string;
171
if (azopen(proto_stream.get(), proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
288
if (azopen(&proto_stream, proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
172
289
return HA_ERR_CRASHED_ON_USAGE;
174
proto_string= (char*)malloc(sizeof(char) * proto_stream->frm_length);
291
proto_string= (char*)malloc(sizeof(char) * proto_stream.frm_length);
175
292
if (proto_string == NULL)
177
azclose(proto_stream.get());
294
azclose(&proto_stream);
181
azread_frm(proto_stream.get(), proto_string);
298
azread_frm(&proto_stream, proto_string);
183
if (table_proto.ParseFromArray(proto_string, proto_stream->frm_length) == false)
300
if (table_proto->ParseFromArray(proto_string, proto_stream.frm_length) == false)
184
301
error= HA_ERR_CRASHED_ON_USAGE;
186
azclose(proto_stream.get());
303
azclose(&proto_stream);
187
304
free(proto_string);
190
/* We set the name from what we've asked for as in RENAME TABLE for ARCHIVE
191
we do not rewrite the table proto (as it's wedged in the file header)
193
table_proto.set_schema(identifier.getSchemaName());
194
table_proto.set_name(identifier.getTableName());
310
static ArchiveEngine *archive_engine= NULL;
313
Initialize the archive Cursor.
324
static int archive_db_init(drizzled::plugin::Registry ®istry)
327
pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST);
328
archive_engine= new ArchiveEngine("ARCHIVE");
329
registry.add(archive_engine);
331
/* When the engine starts up set the first version */
338
Release the archive Cursor.
348
static int archive_db_done(drizzled::plugin::Registry ®istry)
350
registry.remove(archive_engine);
351
delete archive_engine;
353
pthread_mutex_destroy(&archive_mutex);
200
359
ha_archive::ha_archive(drizzled::plugin::StorageEngine &engine_arg,
360
TableShare &table_arg)
202
361
:Cursor(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
204
363
/* Set our original buffer from pre-allocated memory */
205
364
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
207
366
/* The size of the offset value we will use for position() */
208
ref_length= sizeof(internal::my_off_t);
367
ref_length= sizeof(my_off_t);
209
368
archive_reader_open= false;
237
396
memset(&archive_write, 0, sizeof(azio_stream)); /* Archive file we are working with */
238
397
table_name.append(name);
239
data_file_name.assign(table_name);
240
data_file_name.append(ARZ);
398
fn_format(data_file_name, table_name.c_str(), "",
399
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
242
401
We will use this lock for rows.
244
pthread_mutex_init(&_mutex,MY_MUTEX_INIT_FAST);
403
pthread_mutex_init(&mutex,MY_MUTEX_INIT_FAST);
247
406
ArchiveShare::~ArchiveShare()
250
pthread_mutex_destroy(&_mutex);
408
thr_lock_delete(&lock);
409
pthread_mutex_destroy(&mutex);
252
411
We need to make sure we don't reset the crashed state.
253
412
If we open a crashed file, wee need to close it as crashed unless
269
428
anything but reading... open it for write and we will generate null
270
429
compression writes).
272
if (!(azopen(archive_tmp.get(), data_file_name.c_str(), O_RDONLY,
431
if (!(azopen(&archive_tmp, data_file_name, O_RDONLY,
273
432
AZ_METHOD_BLOCK)))
276
*auto_increment= archive_tmp->auto_increment + 1;
277
rows_recorded= (ha_rows)archive_tmp->rows;
278
crashed= archive_tmp->dirty;
435
*auto_increment= archive_tmp.auto_increment + 1;
436
rows_recorded= (ha_rows)archive_tmp.rows;
437
crashed= archive_tmp.dirty;
279
438
if (version < global_version)
281
440
version_rows= rows_recorded;
282
441
version= global_version;
284
azclose(archive_tmp.get());
443
azclose(&archive_tmp);
342
500
int ha_archive::free_share()
344
ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
346
pthread_mutex_lock(&a_engine->mutex());
502
pthread_mutex_lock(&archive_mutex);
347
503
if (!--share->use_count)
505
ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(engine);
349
506
a_engine->deleteOpenTable(share->table_name);
352
pthread_mutex_unlock(&a_engine->mutex());
509
pthread_mutex_unlock(&archive_mutex);
361
518
a gzip file that can be both read and written we keep a writer open
362
519
that is shared amoung all open tables.
364
if (!(azopen(&(share->archive_write), share->data_file_name.c_str(),
521
if (!(azopen(&(share->archive_write), share->data_file_name,
365
522
O_RDWR, AZ_METHOD_BLOCK)))
367
524
share->crashed= true;
414
574
We open the file we will read from.
416
int ha_archive::doOpen(const TableIdentifier &identifier, int , uint32_t )
576
int ha_archive::open(const char *name, int, uint32_t)
419
share= get_share(identifier.getPath().c_str(), &rc);
579
share= get_share(name, &rc);
422
582
We either fix it ourselves, or we just take it offline
440
record_buffer.resize(getTable()->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
442
lock.init(&share->_lock);
600
record_buffer= create_record_buffer(table->s->reclength +
601
ARCHIVE_ROW_HEADER_SIZE);
606
return(HA_ERR_OUT_OF_MEM);
609
thr_lock_data_init(&share->lock, &lock, NULL);
447
// Should never be called
448
int ha_archive::open(const char *, int, uint32_t)
500
int ArchiveEngine::doCreateTable(Session &,
660
int ArchiveEngine::doCreateTable(Session *,
661
const char *table_name,
501
662
Table& table_arg,
502
const drizzled::TableIdentifier &identifier,
503
663
drizzled::message::Table& proto)
665
char name_buff[FN_REFLEN];
506
boost::scoped_ptr<azio_stream> create_stream(new azio_stream);
667
azio_stream create_stream; /* Archive file we are working with */
507
668
uint64_t auto_increment_value;
508
669
string serialized_proto;
512
673
for (uint32_t key= 0; key < table_arg.sizeKeys(); key++)
514
KeyInfo *pos= &table_arg.key_info[key];
515
KeyPartInfo *key_part= pos->key_part;
516
KeyPartInfo *key_part_end= key_part + pos->key_parts;
675
KEY *pos= table_arg.key_info+key;
676
KEY_PART_INFO *key_part= pos->key_part;
677
KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
518
679
for (; key_part != key_part_end; key_part++)
522
683
if (!(field->flags & AUTO_INCREMENT_FLAG))
529
std::string named_file= identifier.getPath();
530
named_file.append(ARZ);
692
We reuse name_buff since it is available.
694
fn_format(name_buff, table_name, "", ARZ,
695
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
533
if (azopen(create_stream.get(), named_file.c_str(), O_CREAT|O_RDWR,
698
if (azopen(&create_stream, name_buff, O_CREAT|O_RDWR,
534
699
AZ_METHOD_BLOCK) == 0)
537
unlink(named_file.c_str());
539
return(error ? error : -1);
543
proto.SerializeToString(&serialized_proto);
547
unlink(named_file.c_str());
549
return(error ? error : -1);
552
if (azwrite_frm(create_stream.get(), serialized_proto.c_str(),
705
proto.SerializeToString(&serialized_proto);
707
if (azwrite_frm(&create_stream, serialized_proto.c_str(),
553
708
serialized_proto.length()))
555
unlink(named_file.c_str());
557
return(error ? error : -1);
560
711
if (proto.options().has_comment())
562
713
int write_length;
564
write_length= azwrite_comment(create_stream.get(),
715
write_length= azwrite_comment(&create_stream,
565
716
proto.options().comment().c_str(),
566
717
proto.options().comment().length());
568
719
if (write_length < 0)
571
unlink(named_file.c_str());
573
return(error ? error : -1);
578
727
Yes you need to do this, because the starting value
579
728
for the autoincrement may not be zero.
581
create_stream->auto_increment= auto_increment_value ?
730
create_stream.auto_increment= auto_increment_value ?
582
731
auto_increment_value - 1 : 0;
584
if (azclose(create_stream.get()))
733
if (azclose(&create_stream))
587
unlink(named_file.c_str());
589
return(error ? error : -1);
745
/* Return error number, if we got one */
746
return(error ? error : -1);
624
778
uint32_t ha_archive::max_row_length(const unsigned char *)
626
uint32_t length= (uint32_t)(getTable()->getRecordLength() + getTable()->sizeFields()*2);
780
uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
627
781
length+= ARCHIVE_ROW_HEADER_SIZE;
629
783
uint32_t *ptr, *end;
630
for (ptr= getTable()->getBlobField(), end=ptr + getTable()->sizeBlobFields();
784
for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
634
length += 2 + ((Field_blob*)getTable()->getField(*ptr))->get_length();
788
length += 2 + ((Field_blob*)table->field[*ptr])->get_length();
646
800
return(HA_ERR_OUT_OF_MEM);
648
802
/* Copy null bits */
649
memcpy(&record_buffer[0], record, getTable()->getShare()->null_bytes);
650
ptr= &record_buffer[0] + getTable()->getShare()->null_bytes;
803
memcpy(record_buffer->buffer, record, table->s->null_bytes);
804
ptr= record_buffer->buffer + table->s->null_bytes;
652
for (Field **field=getTable()->getFields() ; *field ; field++)
806
for (Field **field=table->field ; *field ; field++)
654
808
if (!((*field)->is_null()))
655
809
ptr= (*field)->pack(ptr, record + (*field)->offset(record));
658
return((unsigned int) (ptr - &record_buffer[0]));
812
return((unsigned int) (ptr - record_buffer->buffer));
668
822
for implementing start_bulk_insert() is that we could skip
669
823
setting dirty to true each time.
671
int ha_archive::doInsertRecord(unsigned char *buf)
825
int ha_archive::write_row(unsigned char *buf)
674
828
unsigned char *read_buf= NULL;
675
829
uint64_t temp_auto;
676
unsigned char *record= getTable()->getInsertRecord();
830
unsigned char *record= table->record[0];
678
832
if (share->crashed)
679
833
return(HA_ERR_CRASHED_ON_USAGE);
681
pthread_mutex_lock(&share->mutex());
835
ha_statistic_increment(&SSV::ha_write_count);
836
pthread_mutex_lock(&share->mutex);
683
838
if (share->archive_write_open == false)
684
839
if (init_archive_writer())
685
840
return(HA_ERR_CRASHED_ON_USAGE);
688
if (getTable()->next_number_field && record == getTable()->getInsertRecord())
843
if (table->next_number_field && record == table->record[0])
845
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
690
846
update_auto_increment();
691
temp_auto= getTable()->next_number_field->val_int();
847
temp_auto= table->next_number_field->val_int();
694
850
We don't support decremening auto_increment. They make the performance
697
853
if (temp_auto <= share->archive_write.auto_increment &&
698
getTable()->getShare()->getKeyInfo(0).flags & HA_NOSAME)
854
mkey->flags & HA_NOSAME)
700
856
rc= HA_ERR_FOUND_DUPP_KEY;
743
899
the optimizer that we have unique indexes, we scan
745
901
int ha_archive::index_read(unsigned char *buf, const unsigned char *key,
746
uint32_t key_len, enum ha_rkey_function)
902
uint32_t key_len, enum ha_rkey_function find_flag)
905
rc= index_read_idx(buf, active_index, key, key_len, find_flag);
910
int ha_archive::index_read_idx(unsigned char *buf, uint32_t index, const unsigned char *key,
911
uint32_t key_len, enum ha_rkey_function)
750
current_k_offset= getTable()->getShare()->getKeyInfo(0).key_part->offset;
915
KEY *mkey= &table->s->key_info[index];
916
current_k_offset= mkey->key_part->offset;
751
917
current_key= key;
752
918
current_key_len= key_len;
754
rc= doStartTableScan(true);
832
998
/* Reallocate buffer if needed */
833
999
bool ha_archive::fix_rec_buff(unsigned int length)
835
record_buffer.resize(length);
1001
assert(record_buffer->buffer);
1003
if (length > record_buffer->length)
1005
unsigned char *newptr;
1006
if (!(newptr= (unsigned char *)realloc(record_buffer->buffer, length)))
1008
record_buffer->buffer= newptr;
1009
record_buffer->length= length;
1012
assert(length <= record_buffer->length);
840
1017
int ha_archive::unpack_row(azio_stream *file_to_read, unsigned char *record)
854
1031
/* Copy null bits */
855
memcpy(record, ptr, getTable()->getNullBytes());
856
ptr+= getTable()->getNullBytes();
857
for (Field **field= getTable()->getFields() ; *field ; field++)
1032
memcpy(record, ptr, table->getNullBytes());
1033
ptr+= table->getNullBytes();
1034
for (Field **field=table->field ; *field ; field++)
859
1036
if (!((*field)->is_null()))
861
ptr= (*field)->unpack(record + (*field)->offset(getTable()->getInsertRecord()), ptr);
1038
ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
889
1066
return(HA_ERR_END_OF_FILE);
892
ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
1069
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
893
1070
current_position= aztell(&archive);
894
1071
rc= get_row(&archive, buf);
896
getTable()->status=rc ? STATUS_NOT_FOUND: 0;
1073
table->status=rc ? STATUS_NOT_FOUND: 0;
922
1099
int ha_archive::rnd_pos(unsigned char * buf, unsigned char *pos)
924
ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
925
current_position= (internal::my_off_t)internal::my_get_ptr(pos, ref_length);
1101
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1102
current_position= (my_off_t)my_get_ptr(pos, ref_length);
926
1103
if (azseek(&archive, (size_t)current_position, SEEK_SET) == (size_t)(-1L))
927
1104
return(HA_ERR_CRASHED_ON_USAGE);
928
1105
return(get_row(&archive, buf));
971
1149
azread_frm(&archive, proto_string);
973
1151
/* Lets create a file to contain the new data */
974
std::string writer_filename= share->table_name;
975
writer_filename.append(ARN);
1152
fn_format(writer_filename, share->table_name.c_str(), "", ARN,
1153
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
977
if (!(azopen(writer.get(), writer_filename.c_str(), O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1155
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
979
1157
free(proto_string);
980
1158
return(HA_ERR_CRASHED_ON_USAGE);
983
azwrite_frm(writer.get(), proto_string, archive.frm_length);
1161
azwrite_frm(&writer, proto_string, archive.frm_length);
986
1164
An extended rebuild is a lot more effort. We open up each row and re-record it.
1012
1191
rows_restored= archive.rows;
1014
for (uint64_t x= 0; x < rows_restored ; x++)
1193
for (x= 0; x < rows_restored ; x++)
1016
rc= get_row(&archive, getTable()->getInsertRecord());
1195
rc= get_row(&archive, table->record[0]);
1021
real_write_row(getTable()->getInsertRecord(), writer.get());
1200
real_write_row(table->record[0], &writer);
1023
1202
Long term it should be possible to optimize this so that
1024
1203
it is not called on each row.
1026
if (getTable()->found_next_number_field)
1205
if (table->found_next_number_field)
1028
Field *field= getTable()->found_next_number_field;
1207
Field *field= table->found_next_number_field;
1030
1209
/* Since we will need to use field to translate, we need to flip its read bit */
1031
1210
field->setReadSet();
1033
1212
uint64_t auto_value=
1034
(uint64_t) field->val_int_internal(getTable()->getInsertRecord() +
1035
field->offset(getTable()->getInsertRecord()));
1213
(uint64_t) field->val_int(table->record[0] +
1214
field->offset(table->record[0]));
1036
1215
if (share->archive_write.auto_increment < auto_value)
1037
1216
stats.auto_increment_value=
1038
1217
(share->archive_write.auto_increment= auto_value) + 1;
1041
share->rows_recorded= (ha_rows)writer->rows;
1220
share->rows_recorded= (ha_rows)writer.rows;
1044
1223
if (rc && rc != HA_ERR_END_OF_FILE)
1115
1294
If dirty, we lock, and then reset/flush the data.
1116
1295
I found that just calling azflush() doesn't always work.
1118
pthread_mutex_lock(&share->mutex());
1297
pthread_mutex_lock(&share->mutex);
1119
1298
if (share->dirty == true)
1121
1300
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1145
1324
struct stat file_stat; // Stat information for the data file
1147
stat(share->data_file_name.c_str(), &file_stat);
1326
stat(share->data_file_name, &file_stat);
1149
stats.mean_rec_length= getTable()->getRecordLength()+ buffer.alloced_length();
1328
stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
1150
1329
stats.data_file_length= file_stat.st_size;
1151
1330
stats.create_time= file_stat.st_ctime;
1152
1331
stats.update_time= file_stat.st_mtime;
1158
1337
if (flag & HA_STATUS_AUTO)
1160
1339
init_archive_reader();
1161
pthread_mutex_lock(&share->mutex());
1340
pthread_mutex_lock(&share->mutex);
1162
1341
azflush(&archive, Z_SYNC_FLUSH);
1163
pthread_mutex_unlock(&share->mutex());
1342
pthread_mutex_unlock(&share->mutex);
1164
1343
stats.auto_increment_value= archive.auto_increment + 1;
1213
1392
const char *old_proc_info;
1215
1395
old_proc_info= get_session_proc_info(session);
1216
1396
set_session_proc_info(session, "Checking table");
1217
1397
/* Flush any waiting data */
1218
pthread_mutex_lock(&share->mutex());
1398
pthread_mutex_lock(&share->mutex);
1219
1399
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1220
pthread_mutex_unlock(&share->mutex());
1400
pthread_mutex_unlock(&share->mutex);
1223
1403
Now we will rewind the archive file so that we are positioned at the
1226
1406
init_archive_reader();
1227
1407
azflush(&archive, Z_SYNC_FLUSH);
1228
1408
read_data_header(&archive);
1229
for (uint64_t x= 0; x < share->archive_write.rows; x++)
1409
for (x= 0; x < share->archive_write.rows; x++)
1231
rc= get_row(&archive, getTable()->getInsertRecord());
1411
rc= get_row(&archive, table->record[0]);
1250
int ArchiveEngine::doRenameTable(Session&, const TableIdentifier &from, const TableIdentifier &to)
1254
for (const char **ext= bas_ext(); *ext ; ext++)
1256
if (rename_file_ext(from.getPath().c_str(), to.getPath().c_str(), *ext))
1258
if ((error=errno) != ENOENT)
1267
bool ArchiveEngine::doDoesTableExist(Session&,
1268
const TableIdentifier &identifier)
1270
string proto_path(identifier.getPath());
1271
proto_path.append(ARZ);
1273
if (access(proto_path.c_str(), F_OK))
1281
void ArchiveEngine::doGetTableIdentifiers(drizzled::CachedDirectory &directory,
1282
const drizzled::SchemaIdentifier &schema_identifier,
1283
drizzled::TableIdentifier::vector &set_of_identifiers)
1285
drizzled::CachedDirectory::Entries entries= directory.getEntries();
1287
for (drizzled::CachedDirectory::Entries::iterator entry_iter= entries.begin();
1288
entry_iter != entries.end(); ++entry_iter)
1290
drizzled::CachedDirectory::Entry *entry= *entry_iter;
1291
const string *filename= &entry->filename;
1293
assert(filename->size());
1295
const char *ext= strchr(filename->c_str(), '.');
1297
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
1298
(filename->compare(0, strlen(TMP_FILE_PREFIX), TMP_FILE_PREFIX) == 0))
1302
char uname[NAME_LEN + 1];
1303
uint32_t file_name_len;
1305
file_name_len= TableIdentifier::filename_to_tablename(filename->c_str(), uname, sizeof(uname));
1306
// TODO: Remove need for memory copy here
1307
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
1309
set_of_identifiers.push_back(TableIdentifier(schema_identifier, uname));
1430
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1432
archive_record_buffer *r;
1433
if (!(r= (archive_record_buffer*) malloc(sizeof(archive_record_buffer))))
1437
r->length= (int)length;
1439
if (!(r->buffer= (unsigned char*) malloc(r->length)))
1448
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1450
free((char*) r->buffer);
1455
static DRIZZLE_SYSVAR_BOOL(aio, archive_use_aio,
1456
PLUGIN_VAR_NOCMDOPT,
1457
"Whether or not to use asynchronous IO.",
1460
static drizzle_sys_var* archive_system_variables[]= {
1461
DRIZZLE_SYSVAR(aio),
1465
DRIZZLE_DECLARE_PLUGIN
1470
"Brian Aker, MySQL AB",
1471
"Archive storage engine",
1473
archive_db_init, /* Plugin Init */
1474
archive_db_done, /* Plugin Deinit */
1475
NULL, /* status variables */
1476
archive_system_variables, /* system variables */
1477
NULL /* config options */
1479
DRIZZLE_DECLARE_PLUGIN_END;