13
12
You should have received a copy of the GNU General Public License
14
13
along with this program; if not, write to the Free Software
15
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
20
#include <plugin/archive/archive_engine.h>
22
#include <boost/scoped_ptr.hpp>
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
17
#include "drizzled/server_includes.h"
18
#include "drizzled/field.h"
19
#include "drizzled/field/blob.h"
20
#include "drizzled/field/timestamp.h"
21
#include "plugin/myisam/myisam.h"
22
#include "drizzled/table.h"
23
#include "drizzled/session.h"
24
#include <mysys/my_dir.h>
26
#include "ha_archive.h"
24
32
using namespace std;
25
using namespace drizzled;
29
35
First, if you want to understand storage engines you should look at
96
/* When the engine starts up set the first version */
97
static uint64_t global_version= 1;
99
// We use this to find out the state of the archive aio option.
100
extern bool archive_aio_state(void);
102
/* Variables for archive share methods */
103
pthread_mutex_t archive_mutex= PTHREAD_MUTEX_INITIALIZER;
105
static unsigned int global_version;
107
/* The file extension */
108
#define ARZ ".arz" // The data file
109
#define ARN ".ARN" // Files used during an optimize call
113
static bool archive_use_aio= false;
103
116
Number of rows that will force a bulk insert.
110
123
#define ARCHIVE_ROW_HEADER_SIZE 4
126
We just implement one additional file extension.
128
static const char *ha_archive_exts[] = {
133
class ArchiveEngine : public drizzled::plugin::StorageEngine
135
typedef std::map<string, ArchiveShare*> ArchiveMap;
136
ArchiveMap archive_open_tables;
139
ArchiveEngine(const string &name_arg)
140
: drizzled::plugin::StorageEngine(name_arg,
142
HTON_STATS_RECORDS_IS_EXACT |
144
HTON_HAS_DATA_DICTIONARY),
145
archive_open_tables()
147
table_definition_ext= ARZ;
150
virtual Cursor *create(TableShare &table,
153
return new (mem_root) ha_archive(*this, table);
156
const char **bas_ext() const {
157
return ha_archive_exts;
160
int doCreateTable(Session *session, const char *table_name,
162
drizzled::message::Table& proto);
164
int doGetTableDefinition(Session& session,
167
const char *table_name,
169
drizzled::message::Table *table_proto);
171
void doGetTableNames(CachedDirectory &directory, string& , set<string>& set_of_names);
173
int doDropTable(Session&, const string table_path);
174
ArchiveShare *findOpenTable(const string table_name);
175
void addOpenTable(const string &table_name, ArchiveShare *);
176
void deleteOpenTable(const string &table_name);
178
uint32_t max_supported_keys() const { return 1; }
179
uint32_t max_supported_key_length() const { return sizeof(uint64_t); }
180
uint32_t max_supported_key_part_length() const { return sizeof(uint64_t); }
182
uint32_t index_flags(enum ha_key_alg) const
184
return HA_ONLY_WHOLE_INDEX;
112
188
ArchiveShare *ArchiveEngine::findOpenTable(const string table_name)
114
190
ArchiveMap::iterator find_iter=
134
int ArchiveEngine::doDropTable(Session&, const identifier::Table &identifier)
136
string new_path(identifier.getPath());
210
void ArchiveEngine::doGetTableNames(CachedDirectory &directory,
212
set<string>& set_of_names)
214
CachedDirectory::Entries entries= directory.getEntries();
216
for (CachedDirectory::Entries::iterator entry_iter= entries.begin();
217
entry_iter != entries.end(); ++entry_iter)
219
CachedDirectory::Entry *entry= *entry_iter;
220
string *filename= &entry->filename;
222
assert(filename->size());
224
const char *ext= strchr(filename->c_str(), '.');
226
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
227
is_prefix(filename->c_str(), TMP_FILE_PREFIX))
231
char uname[NAME_LEN + 1];
232
uint32_t file_name_len;
234
file_name_len= filename_to_tablename(filename->c_str(), uname, sizeof(uname));
235
// TODO: Remove need for memory copy here
236
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
237
set_of_names.insert(uname);
243
int ArchiveEngine::doDropTable(Session&,
244
const string table_path)
246
string new_path(table_path);
169
boost::scoped_ptr<azio_stream> proto_stream(new azio_stream);
283
azio_stream proto_stream;
170
284
char* proto_string;
171
if (azopen(proto_stream.get(), proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
285
if (azopen(&proto_stream, proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
172
286
return HA_ERR_CRASHED_ON_USAGE;
174
proto_string= (char*)malloc(sizeof(char) * proto_stream->frm_length);
288
proto_string= (char*)malloc(sizeof(char) * proto_stream.frm_length);
175
289
if (proto_string == NULL)
177
azclose(proto_stream.get());
291
azclose(&proto_stream);
181
azread_frm(proto_stream.get(), proto_string);
295
azread_frm(&proto_stream, proto_string);
183
if (table_proto.ParseFromArray(proto_string, proto_stream->frm_length) == false)
297
if (table_proto->ParseFromArray(proto_string, proto_stream.frm_length) == false)
184
298
error= HA_ERR_CRASHED_ON_USAGE;
186
azclose(proto_stream.get());
300
azclose(&proto_stream);
187
301
free(proto_string);
190
/* We set the name from what we've asked for as in RENAME TABLE for ARCHIVE
191
we do not rewrite the table proto (as it's wedged in the file header)
193
table_proto.set_schema(identifier.getSchemaName());
194
table_proto.set_name(identifier.getTableName());
307
static ArchiveEngine *archive_engine= NULL;
310
Initialize the archive Cursor.
321
static int archive_db_init(drizzled::plugin::Registry ®istry)
324
pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST);
325
archive_engine= new ArchiveEngine("ARCHIVE");
326
registry.add(archive_engine);
328
/* When the engine starts up set the first version */
335
Release the archive Cursor.
345
static int archive_db_done(drizzled::plugin::Registry ®istry)
347
registry.remove(archive_engine);
348
delete archive_engine;
350
pthread_mutex_destroy(&archive_mutex);
200
356
ha_archive::ha_archive(drizzled::plugin::StorageEngine &engine_arg,
357
TableShare &table_arg)
202
358
:Cursor(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
204
360
/* Set our original buffer from pre-allocated memory */
205
361
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
207
363
/* The size of the offset value we will use for position() */
208
ref_length= sizeof(internal::my_off_t);
364
ref_length= sizeof(my_off_t);
209
365
archive_reader_open= false;
237
393
memset(&archive_write, 0, sizeof(azio_stream)); /* Archive file we are working with */
238
394
table_name.append(name);
239
data_file_name.assign(table_name);
240
data_file_name.append(ARZ);
395
fn_format(data_file_name, table_name.c_str(), "",
396
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
242
398
We will use this lock for rows.
244
pthread_mutex_init(&_mutex,MY_MUTEX_INIT_FAST);
400
pthread_mutex_init(&mutex,MY_MUTEX_INIT_FAST);
247
403
ArchiveShare::~ArchiveShare()
250
pthread_mutex_destroy(&_mutex);
405
thr_lock_delete(&lock);
406
pthread_mutex_destroy(&mutex);
252
408
We need to make sure we don't reset the crashed state.
253
409
If we open a crashed file, wee need to close it as crashed unless
269
425
anything but reading... open it for write and we will generate null
270
426
compression writes).
272
if (!(azopen(archive_tmp.get(), data_file_name.c_str(), O_RDONLY,
428
if (!(azopen(&archive_tmp, data_file_name, O_RDONLY,
273
429
AZ_METHOD_BLOCK)))
276
*auto_increment= archive_tmp->auto_increment + 1;
277
rows_recorded= (ha_rows)archive_tmp->rows;
278
crashed= archive_tmp->dirty;
432
*auto_increment= archive_tmp.auto_increment + 1;
433
rows_recorded= (ha_rows)archive_tmp.rows;
434
crashed= archive_tmp.dirty;
279
435
if (version < global_version)
281
437
version_rows= rows_recorded;
282
438
version= global_version;
284
azclose(archive_tmp.get());
440
azclose(&archive_tmp);
342
497
int ha_archive::free_share()
344
ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
346
pthread_mutex_lock(&a_engine->mutex());
499
pthread_mutex_lock(&archive_mutex);
347
500
if (!--share->use_count)
502
ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(engine);
349
503
a_engine->deleteOpenTable(share->table_name);
352
pthread_mutex_unlock(&a_engine->mutex());
506
pthread_mutex_unlock(&archive_mutex);
361
515
a gzip file that can be both read and written we keep a writer open
362
516
that is shared amoung all open tables.
364
if (!(azopen(&(share->archive_write), share->data_file_name.c_str(),
518
if (!(azopen(&(share->archive_write), share->data_file_name,
365
519
O_RDWR, AZ_METHOD_BLOCK)))
367
521
share->crashed= true;
414
571
We open the file we will read from.
416
int ha_archive::doOpen(const identifier::Table &identifier, int , uint32_t )
573
int ha_archive::open(const char *name, int, uint32_t)
419
share= get_share(identifier.getPath().c_str(), &rc);
576
share= get_share(name, &rc);
422
579
We either fix it ourselves, or we just take it offline
440
record_buffer.resize(getTable()->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
442
lock.init(&share->_lock);
597
record_buffer= create_record_buffer(table->s->reclength +
598
ARCHIVE_ROW_HEADER_SIZE);
603
return(HA_ERR_OUT_OF_MEM);
606
thr_lock_data_init(&share->lock, &lock, NULL);
447
// Should never be called
448
int ha_archive::open(const char *, int, uint32_t)
500
int ArchiveEngine::doCreateTable(Session &,
657
int ArchiveEngine::doCreateTable(Session *,
658
const char *table_name,
501
659
Table& table_arg,
502
const drizzled::identifier::Table &identifier,
503
660
drizzled::message::Table& proto)
662
char name_buff[FN_REFLEN];
506
boost::scoped_ptr<azio_stream> create_stream(new azio_stream);
664
azio_stream create_stream; /* Archive file we are working with */
507
665
uint64_t auto_increment_value;
508
666
string serialized_proto;
512
670
for (uint32_t key= 0; key < table_arg.sizeKeys(); key++)
514
KeyInfo *pos= &table_arg.key_info[key];
515
KeyPartInfo *key_part= pos->key_part;
516
KeyPartInfo *key_part_end= key_part + pos->key_parts;
672
KEY *pos= table_arg.key_info+key;
673
KEY_PART_INFO *key_part= pos->key_part;
674
KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
518
676
for (; key_part != key_part_end; key_part++)
522
680
if (!(field->flags & AUTO_INCREMENT_FLAG))
529
std::string named_file= identifier.getPath();
530
named_file.append(ARZ);
689
We reuse name_buff since it is available.
691
fn_format(name_buff, table_name, "", ARZ,
692
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
533
if (azopen(create_stream.get(), named_file.c_str(), O_CREAT|O_RDWR,
695
if (azopen(&create_stream, name_buff, O_CREAT|O_RDWR,
534
696
AZ_METHOD_BLOCK) == 0)
537
unlink(named_file.c_str());
539
return(error ? error : -1);
543
proto.SerializeToString(&serialized_proto);
547
unlink(named_file.c_str());
549
return(error ? error : -1);
552
if (azwrite_frm(create_stream.get(), serialized_proto.c_str(),
702
proto.SerializeToString(&serialized_proto);
704
if (azwrite_frm(&create_stream, serialized_proto.c_str(),
553
705
serialized_proto.length()))
555
unlink(named_file.c_str());
557
return(error ? error : -1);
560
708
if (proto.options().has_comment())
562
710
int write_length;
564
write_length= azwrite_comment(create_stream.get(),
712
write_length= azwrite_comment(&create_stream,
565
713
proto.options().comment().c_str(),
566
714
proto.options().comment().length());
568
716
if (write_length < 0)
571
unlink(named_file.c_str());
573
return(error ? error : -1);
578
724
Yes you need to do this, because the starting value
579
725
for the autoincrement may not be zero.
581
create_stream->auto_increment= auto_increment_value ?
727
create_stream.auto_increment= auto_increment_value ?
582
728
auto_increment_value - 1 : 0;
584
if (azclose(create_stream.get()))
730
if (azclose(&create_stream))
587
unlink(named_file.c_str());
589
return(error ? error : -1);
742
/* Return error number, if we got one */
743
return(error ? error : -1);
624
775
uint32_t ha_archive::max_row_length(const unsigned char *)
626
uint32_t length= (uint32_t)(getTable()->getRecordLength() + getTable()->sizeFields()*2);
777
uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
627
778
length+= ARCHIVE_ROW_HEADER_SIZE;
629
780
uint32_t *ptr, *end;
630
for (ptr= getTable()->getBlobField(), end=ptr + getTable()->sizeBlobFields();
781
for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
634
length += 2 + ((Field_blob*)getTable()->getField(*ptr))->get_length();
785
length += 2 + ((Field_blob*)table->field[*ptr])->get_length();
646
797
return(HA_ERR_OUT_OF_MEM);
648
799
/* Copy null bits */
649
memcpy(&record_buffer[0], record, getTable()->getShare()->null_bytes);
650
ptr= &record_buffer[0] + getTable()->getShare()->null_bytes;
800
memcpy(record_buffer->buffer, record, table->s->null_bytes);
801
ptr= record_buffer->buffer + table->s->null_bytes;
652
for (Field **field=getTable()->getFields() ; *field ; field++)
803
for (Field **field=table->field ; *field ; field++)
654
805
if (!((*field)->is_null()))
655
806
ptr= (*field)->pack(ptr, record + (*field)->offset(record));
658
return((unsigned int) (ptr - &record_buffer[0]));
809
return((unsigned int) (ptr - record_buffer->buffer));
668
819
for implementing start_bulk_insert() is that we could skip
669
820
setting dirty to true each time.
671
int ha_archive::doInsertRecord(unsigned char *buf)
822
int ha_archive::write_row(unsigned char *buf)
674
825
unsigned char *read_buf= NULL;
675
826
uint64_t temp_auto;
676
unsigned char *record= getTable()->getInsertRecord();
827
unsigned char *record= table->record[0];
678
829
if (share->crashed)
679
830
return(HA_ERR_CRASHED_ON_USAGE);
681
pthread_mutex_lock(&share->mutex());
832
ha_statistic_increment(&SSV::ha_write_count);
833
pthread_mutex_lock(&share->mutex);
683
835
if (share->archive_write_open == false)
684
836
if (init_archive_writer())
685
837
return(HA_ERR_CRASHED_ON_USAGE);
688
if (getTable()->next_number_field && record == getTable()->getInsertRecord())
840
if (table->next_number_field && record == table->record[0])
842
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
690
843
update_auto_increment();
691
temp_auto= getTable()->next_number_field->val_int();
844
temp_auto= table->next_number_field->val_int();
694
847
We don't support decremening auto_increment. They make the performance
697
850
if (temp_auto <= share->archive_write.auto_increment &&
698
getTable()->getShare()->getKeyInfo(0).flags & HA_NOSAME)
851
mkey->flags & HA_NOSAME)
700
853
rc= HA_ERR_FOUND_DUPP_KEY;
743
896
the optimizer that we have unique indexes, we scan
745
898
int ha_archive::index_read(unsigned char *buf, const unsigned char *key,
746
uint32_t key_len, enum ha_rkey_function)
899
uint32_t key_len, enum ha_rkey_function find_flag)
902
rc= index_read_idx(buf, active_index, key, key_len, find_flag);
907
int ha_archive::index_read_idx(unsigned char *buf, uint32_t index, const unsigned char *key,
908
uint32_t key_len, enum ha_rkey_function)
750
current_k_offset= getTable()->getShare()->getKeyInfo(0).key_part->offset;
912
KEY *mkey= &table->s->key_info[index];
913
current_k_offset= mkey->key_part->offset;
751
914
current_key= key;
752
915
current_key_len= key_len;
754
rc= doStartTableScan(true);
832
995
/* Reallocate buffer if needed */
833
996
bool ha_archive::fix_rec_buff(unsigned int length)
835
record_buffer.resize(length);
998
assert(record_buffer->buffer);
1000
if (length > record_buffer->length)
1002
unsigned char *newptr;
1003
if (!(newptr= (unsigned char *)realloc(record_buffer->buffer, length)))
1005
record_buffer->buffer= newptr;
1006
record_buffer->length= length;
1009
assert(length <= record_buffer->length);
840
1014
int ha_archive::unpack_row(azio_stream *file_to_read, unsigned char *record)
854
1028
/* Copy null bits */
855
memcpy(record, ptr, getTable()->getNullBytes());
856
ptr+= getTable()->getNullBytes();
857
for (Field **field= getTable()->getFields() ; *field ; field++)
1029
memcpy(record, ptr, table->getNullBytes());
1030
ptr+= table->getNullBytes();
1031
for (Field **field=table->field ; *field ; field++)
859
1033
if (!((*field)->is_null()))
861
ptr= (*field)->unpack(record + (*field)->offset(getTable()->getInsertRecord()), ptr);
1035
ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
889
1063
return(HA_ERR_END_OF_FILE);
892
ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
1066
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
893
1067
current_position= aztell(&archive);
894
1068
rc= get_row(&archive, buf);
896
getTable()->status=rc ? STATUS_NOT_FOUND: 0;
1070
table->status=rc ? STATUS_NOT_FOUND: 0;
922
1096
int ha_archive::rnd_pos(unsigned char * buf, unsigned char *pos)
924
ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
925
current_position= (internal::my_off_t)internal::my_get_ptr(pos, ref_length);
1098
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1099
current_position= (my_off_t)my_get_ptr(pos, ref_length);
926
1100
if (azseek(&archive, (size_t)current_position, SEEK_SET) == (size_t)(-1L))
927
1101
return(HA_ERR_CRASHED_ON_USAGE);
928
1102
return(get_row(&archive, buf));
971
1146
azread_frm(&archive, proto_string);
973
1148
/* Lets create a file to contain the new data */
974
std::string writer_filename= share->table_name;
975
writer_filename.append(ARN);
1149
fn_format(writer_filename, share->table_name.c_str(), "", ARN,
1150
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
977
if (!(azopen(writer.get(), writer_filename.c_str(), O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1152
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
979
1154
free(proto_string);
980
1155
return(HA_ERR_CRASHED_ON_USAGE);
983
azwrite_frm(writer.get(), proto_string, archive.frm_length);
1158
azwrite_frm(&writer, proto_string, archive.frm_length);
986
1161
An extended rebuild is a lot more effort. We open up each row and re-record it.
1012
1188
rows_restored= archive.rows;
1014
for (uint64_t x= 0; x < rows_restored ; x++)
1190
for (x= 0; x < rows_restored ; x++)
1016
rc= get_row(&archive, getTable()->getInsertRecord());
1192
rc= get_row(&archive, table->record[0]);
1021
real_write_row(getTable()->getInsertRecord(), writer.get());
1197
real_write_row(table->record[0], &writer);
1023
1199
Long term it should be possible to optimize this so that
1024
1200
it is not called on each row.
1026
if (getTable()->found_next_number_field)
1202
if (table->found_next_number_field)
1028
Field *field= getTable()->found_next_number_field;
1204
Field *field= table->found_next_number_field;
1030
1206
/* Since we will need to use field to translate, we need to flip its read bit */
1031
1207
field->setReadSet();
1033
1209
uint64_t auto_value=
1034
(uint64_t) field->val_int_internal(getTable()->getInsertRecord() +
1035
field->offset(getTable()->getInsertRecord()));
1210
(uint64_t) field->val_int(table->record[0] +
1211
field->offset(table->record[0]));
1036
1212
if (share->archive_write.auto_increment < auto_value)
1037
1213
stats.auto_increment_value=
1038
1214
(share->archive_write.auto_increment= auto_value) + 1;
1041
share->rows_recorded= (ha_rows)writer->rows;
1217
share->rows_recorded= (ha_rows)writer.rows;
1044
1220
if (rc && rc != HA_ERR_END_OF_FILE)
1115
1291
If dirty, we lock, and then reset/flush the data.
1116
1292
I found that just calling azflush() doesn't always work.
1118
pthread_mutex_lock(&share->mutex());
1294
pthread_mutex_lock(&share->mutex);
1119
1295
if (share->dirty == true)
1121
1297
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1145
1321
struct stat file_stat; // Stat information for the data file
1147
stat(share->data_file_name.c_str(), &file_stat);
1323
stat(share->data_file_name, &file_stat);
1149
stats.mean_rec_length= getTable()->getRecordLength()+ buffer.alloced_length();
1325
stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
1150
1326
stats.data_file_length= file_stat.st_size;
1151
1327
stats.create_time= file_stat.st_ctime;
1152
1328
stats.update_time= file_stat.st_mtime;
1158
1334
if (flag & HA_STATUS_AUTO)
1160
1336
init_archive_reader();
1161
pthread_mutex_lock(&share->mutex());
1337
pthread_mutex_lock(&share->mutex);
1162
1338
azflush(&archive, Z_SYNC_FLUSH);
1163
pthread_mutex_unlock(&share->mutex());
1339
pthread_mutex_unlock(&share->mutex);
1164
1340
stats.auto_increment_value= archive.auto_increment + 1;
1213
1389
const char *old_proc_info;
1215
old_proc_info= session->get_proc_info();
1216
session->set_proc_info("Checking table");
1392
old_proc_info= get_session_proc_info(session);
1393
set_session_proc_info(session, "Checking table");
1217
1394
/* Flush any waiting data */
1218
pthread_mutex_lock(&share->mutex());
1395
pthread_mutex_lock(&share->mutex);
1219
1396
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1220
pthread_mutex_unlock(&share->mutex());
1397
pthread_mutex_unlock(&share->mutex);
1223
1400
Now we will rewind the archive file so that we are positioned at the
1226
1403
init_archive_reader();
1227
1404
azflush(&archive, Z_SYNC_FLUSH);
1228
1405
read_data_header(&archive);
1229
for (uint64_t x= 0; x < share->archive_write.rows; x++)
1406
for (x= 0; x < share->archive_write.rows; x++)
1231
rc= get_row(&archive, getTable()->getInsertRecord());
1408
rc= get_row(&archive, table->record[0]);
1237
session->set_proc_info(old_proc_info);
1414
set_session_proc_info(session, old_proc_info);
1239
1416
if ((rc && rc != HA_ERR_END_OF_FILE))
1250
int ArchiveEngine::doRenameTable(Session&, const identifier::Table &from, const identifier::Table &to)
1254
for (const char **ext= bas_ext(); *ext ; ext++)
1256
if (rename_file_ext(from.getPath().c_str(), to.getPath().c_str(), *ext))
1258
if ((error=errno) != ENOENT)
1267
bool ArchiveEngine::doDoesTableExist(Session&,
1268
const identifier::Table &identifier)
1270
string proto_path(identifier.getPath());
1271
proto_path.append(ARZ);
1273
if (access(proto_path.c_str(), F_OK))
1281
void ArchiveEngine::doGetTableIdentifiers(drizzled::CachedDirectory &directory,
1282
const drizzled::identifier::Schema &schema_identifier,
1283
drizzled::identifier::Table::vector &set_of_identifiers)
1285
drizzled::CachedDirectory::Entries entries= directory.getEntries();
1287
for (drizzled::CachedDirectory::Entries::iterator entry_iter= entries.begin();
1288
entry_iter != entries.end(); ++entry_iter)
1290
drizzled::CachedDirectory::Entry *entry= *entry_iter;
1291
const string *filename= &entry->filename;
1293
assert(filename->size());
1295
const char *ext= strchr(filename->c_str(), '.');
1297
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
1298
(filename->compare(0, strlen(TMP_FILE_PREFIX), TMP_FILE_PREFIX) == 0))
1302
char uname[NAME_LEN + 1];
1303
uint32_t file_name_len;
1305
file_name_len= identifier::Table::filename_to_tablename(filename->c_str(), uname, sizeof(uname));
1306
// TODO: Remove need for memory copy here
1307
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
1309
set_of_identifiers.push_back(identifier::Table(schema_identifier, uname));
1427
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1429
archive_record_buffer *r;
1430
if (!(r= (archive_record_buffer*) malloc(sizeof(archive_record_buffer))))
1434
r->length= (int)length;
1436
if (!(r->buffer= (unsigned char*) malloc(r->length)))
1445
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1447
free((char*) r->buffer);
1452
static DRIZZLE_SYSVAR_BOOL(aio, archive_use_aio,
1453
PLUGIN_VAR_NOCMDOPT,
1454
"Whether or not to use asynchronous IO.",
1457
static drizzle_sys_var* archive_system_variables[]= {
1458
DRIZZLE_SYSVAR(aio),
1462
DRIZZLE_DECLARE_PLUGIN
1466
"Brian Aker, MySQL AB",
1467
"Archive storage engine",
1469
archive_db_init, /* Plugin Init */
1470
archive_db_done, /* Plugin Deinit */
1471
NULL, /* status variables */
1472
archive_system_variables, /* system variables */
1473
NULL /* config options */
1475
DRIZZLE_DECLARE_PLUGIN_END;