13
12
You should have received a copy of the GNU General Public License
14
13
along with this program; if not, write to the Free Software
15
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
20
#include "plugin/archive/archive_engine.h"
22
#include <boost/scoped_ptr.hpp>
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
17
#include "drizzled/server_includes.h"
18
#include "drizzled/field.h"
19
#include "drizzled/field/blob.h"
20
#include "drizzled/field/timestamp.h"
21
#include "plugin/myisam/myisam.h"
22
#include "drizzled/table.h"
23
#include "drizzled/session.h"
24
#include <mysys/my_dir.h>
26
#include "ha_archive.h"
24
32
using namespace std;
25
using namespace drizzled;
34
static const string engine_name("ARCHIVE");
29
37
First, if you want to understand storage engines you should look at
96
/* When the engine starts up set the first version */
97
static uint64_t global_version= 1;
99
// We use this to find out the state of the archive aio option.
100
extern bool archive_aio_state(void);
104
/* Variables for archive share methods */
105
pthread_mutex_t archive_mutex= PTHREAD_MUTEX_INITIALIZER;
107
std::map<const char *, ArchiveShare *> archive_open_tables;
109
static unsigned int global_version;
111
/* The file extension */
112
#define ARZ ".arz" // The data file
113
#define ARN ".ARN" // Files used during an optimize call
117
static bool archive_use_aio= false;
103
120
Number of rows that will force a bulk insert.
110
127
#define ARCHIVE_ROW_HEADER_SIZE 4
112
ArchiveShare *ArchiveEngine::findOpenTable(const string table_name)
114
ArchiveMap::iterator find_iter=
115
archive_open_tables.find(table_name);
117
if (find_iter != archive_open_tables.end())
118
return (*find_iter).second;
123
void ArchiveEngine::addOpenTable(const string &table_name, ArchiveShare *share)
125
archive_open_tables[table_name]= share;
128
void ArchiveEngine::deleteOpenTable(const string &table_name)
130
archive_open_tables.erase(table_name);
134
int ArchiveEngine::doDropTable(Session&, const TableIdentifier &identifier)
136
string new_path(identifier.getPath());
130
We just implement one additional file extension.
132
static const char *ha_archive_exts[] = {
137
class ArchiveEngine : public drizzled::plugin::StorageEngine
140
ArchiveEngine(const string &name_arg)
141
: drizzled::plugin::StorageEngine(name_arg,
143
| HTON_HAS_DATA_DICTIONARY)
145
table_definition_ext= ARZ;
148
virtual Cursor *create(TableShare &table,
151
return new (mem_root) ha_archive(*this, table);
154
const char **bas_ext() const {
155
return ha_archive_exts;
158
int doCreateTable(Session *session, const char *table_name,
159
Table& table_arg, HA_CREATE_INFO& create_info,
160
drizzled::message::Table& proto);
162
int doGetTableDefinition(Session& session,
165
const char *table_name,
167
drizzled::message::Table *table_proto);
169
void doGetTableNames(CachedDirectory &directory, string& , set<string>& set_of_names);
171
int doDropTable(Session&, const string table_path);
175
void ArchiveEngine::doGetTableNames(CachedDirectory &directory,
177
set<string>& set_of_names)
179
CachedDirectory::Entries entries= directory.getEntries();
181
for (CachedDirectory::Entries::iterator entry_iter= entries.begin();
182
entry_iter != entries.end(); ++entry_iter)
184
CachedDirectory::Entry *entry= *entry_iter;
185
string *filename= &entry->filename;
187
assert(filename->size());
189
const char *ext= strchr(filename->c_str(), '.');
191
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
192
is_prefix(filename->c_str(), TMP_FILE_PREFIX))
196
char uname[NAME_LEN + 1];
197
uint32_t file_name_len;
199
file_name_len= filename_to_tablename(filename->c_str(), uname, sizeof(uname));
200
// TODO: Remove need for memory copy here
201
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
202
set_of_names.insert(uname);
208
int ArchiveEngine::doDropTable(Session&,
209
const string table_path)
211
string new_path(table_path);
219
error= my_errno= errno;
150
225
int ArchiveEngine::doGetTableDefinition(Session&,
151
const TableIdentifier &identifier,
152
drizzled::message::Table &table_proto)
230
drizzled::message::Table *table_proto)
154
232
struct stat stat_info;
156
234
string proto_path;
158
236
proto_path.reserve(FN_REFLEN);
159
proto_path.assign(identifier.getPath());
237
proto_path.assign(path);
161
239
proto_path.append(ARZ);
163
241
if (stat(proto_path.c_str(),&stat_info))
169
boost::scoped_ptr<azio_stream> proto_stream(new azio_stream);
246
azio_stream proto_stream;
170
247
char* proto_string;
171
if (azopen(proto_stream.get(), proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
248
if(azopen(&proto_stream, proto_path.c_str(), O_RDONLY, AZ_METHOD_BLOCK) == 0)
172
249
return HA_ERR_CRASHED_ON_USAGE;
174
proto_string= (char*)malloc(sizeof(char) * proto_stream->frm_length);
251
proto_string= (char*)malloc(sizeof(char) * proto_stream.frm_length);
175
252
if (proto_string == NULL)
177
azclose(proto_stream.get());
254
azclose(&proto_stream);
181
azread_frm(proto_stream.get(), proto_string);
258
azread_frm(&proto_stream, proto_string);
183
if (table_proto.ParseFromArray(proto_string, proto_stream->frm_length) == false)
260
if(table_proto->ParseFromArray(proto_string, proto_stream.frm_length) == false)
184
261
error= HA_ERR_CRASHED_ON_USAGE;
186
azclose(proto_stream.get());
263
azclose(&proto_stream);
187
264
free(proto_string);
190
/* We set the name from what we've asked for as in RENAME TABLE for ARCHIVE
191
we do not rewrite the table proto (as it's wedged in the file header)
193
table_proto.set_schema(identifier.getSchemaName());
194
table_proto.set_name(identifier.getTableName());
270
static ArchiveEngine *archive_engine= NULL;
273
Initialize the archive Cursor.
284
static int archive_db_init(drizzled::plugin::Registry ®istry)
287
pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST);
288
archive_engine= new ArchiveEngine(engine_name);
289
registry.add(archive_engine);
291
/* When the engine starts up set the first version */
298
Release the archive Cursor.
308
static int archive_db_done(drizzled::plugin::Registry ®istry)
310
registry.remove(archive_engine);
311
delete archive_engine;
313
pthread_mutex_destroy(&archive_mutex);
200
319
ha_archive::ha_archive(drizzled::plugin::StorageEngine &engine_arg,
320
TableShare &table_arg)
202
321
:Cursor(engine_arg, table_arg), delayed_insert(0), bulk_insert(0)
204
323
/* Set our original buffer from pre-allocated memory */
205
324
buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
207
326
/* The size of the offset value we will use for position() */
208
ref_length= sizeof(internal::my_off_t);
327
ref_length= sizeof(my_off_t);
209
328
archive_reader_open= false;
237
356
memset(&archive_write, 0, sizeof(azio_stream)); /* Archive file we are working with */
238
357
table_name.append(name);
239
data_file_name.assign(table_name);
240
data_file_name.append(ARZ);
358
fn_format(data_file_name, table_name.c_str(), "",
359
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
242
361
We will use this lock for rows.
244
pthread_mutex_init(&_mutex,MY_MUTEX_INIT_FAST);
363
pthread_mutex_init(&mutex,MY_MUTEX_INIT_FAST);
247
366
ArchiveShare::~ArchiveShare()
250
pthread_mutex_destroy(&_mutex);
368
thr_lock_delete(&lock);
369
pthread_mutex_destroy(&mutex);
252
371
We need to make sure we don't reset the crashed state.
253
372
If we open a crashed file, wee need to close it as crashed unless
269
388
anything but reading... open it for write and we will generate null
270
389
compression writes).
272
if (!(azopen(archive_tmp.get(), data_file_name.c_str(), O_RDONLY,
391
if (!(azopen(&archive_tmp, data_file_name, O_RDONLY,
273
392
AZ_METHOD_BLOCK)))
276
*auto_increment= archive_tmp->auto_increment + 1;
277
rows_recorded= (ha_rows)archive_tmp->rows;
278
crashed= archive_tmp->dirty;
395
*auto_increment= archive_tmp.auto_increment + 1;
396
rows_recorded= (ha_rows)archive_tmp.rows;
397
crashed= archive_tmp.dirty;
279
398
if (version < global_version)
281
400
version_rows= rows_recorded;
282
401
version= global_version;
284
azclose(archive_tmp.get());
403
azclose(&archive_tmp);
297
416
ArchiveShare *ha_archive::get_share(const char *table_name, int *rc)
299
ArchiveEngine *a_engine= static_cast<ArchiveEngine *>(getEngine());
301
pthread_mutex_lock(&a_engine->mutex());
303
share= a_engine->findOpenTable(table_name);
419
map<const char *, ArchiveShare *> ::iterator find_iter;
421
pthread_mutex_lock(&archive_mutex);
422
length=(uint) strlen(table_name);
424
find_iter= archive_open_tables.find(table_name);
426
if (find_iter != archive_open_tables.end())
427
share= (*find_iter).second;
309
435
if (share == NULL)
311
pthread_mutex_unlock(&a_engine->mutex());
437
pthread_mutex_unlock(&archive_mutex);
312
438
*rc= HA_ERR_OUT_OF_MEM;
316
442
if (share->prime(&stats.auto_increment_value) == false)
318
pthread_mutex_unlock(&a_engine->mutex());
444
pthread_mutex_unlock(&archive_mutex);
319
445
*rc= HA_ERR_CRASHED_ON_REPAIR;
325
a_engine->addOpenTable(share->table_name, share);
326
thr_lock_init(&share->_lock);
451
archive_open_tables[share->table_name.c_str()]= share;
452
thr_lock_init(&share->lock);
328
454
share->use_count++;
330
455
if (share->crashed)
331
456
*rc= HA_ERR_CRASHED_ON_USAGE;
332
pthread_mutex_unlock(&a_engine->mutex());
457
pthread_mutex_unlock(&archive_mutex);
414
540
We open the file we will read from.
416
int ha_archive::doOpen(const TableIdentifier &identifier, int , uint32_t )
542
int ha_archive::open(const char *name, int, uint32_t open_options)
419
share= get_share(identifier.getPath().c_str(), &rc);
422
We either fix it ourselves, or we just take it offline
424
@todo Create some documentation in the recovery tools shipped with the engine.
426
if (rc == HA_ERR_CRASHED_ON_USAGE)
545
share= get_share(name, &rc);
547
if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR))
433
552
else if (rc == HA_ERR_OUT_OF_MEM)
440
record_buffer.resize(getTable()->getShare()->getRecordLength() + ARCHIVE_ROW_HEADER_SIZE);
442
lock.init(&share->_lock);
447
// Should never be called
448
int ha_archive::open(const char *, int, uint32_t)
559
record_buffer= create_record_buffer(table->s->reclength +
560
ARCHIVE_ROW_HEADER_SIZE);
565
return(HA_ERR_OUT_OF_MEM);
568
thr_lock_data_init(&share->lock, &lock, NULL);
570
if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
500
int ArchiveEngine::doCreateTable(Session &,
624
int ArchiveEngine::doCreateTable(Session *,
625
const char *table_name,
501
626
Table& table_arg,
502
const drizzled::TableIdentifier &identifier,
627
HA_CREATE_INFO& create_info,
503
628
drizzled::message::Table& proto)
630
char name_buff[FN_REFLEN];
506
boost::scoped_ptr<azio_stream> create_stream(new azio_stream);
632
azio_stream create_stream; /* Archive file we are working with */
507
633
uint64_t auto_increment_value;
508
634
string serialized_proto;
510
auto_increment_value= proto.options().auto_increment_value();
636
auto_increment_value= create_info.auto_increment_value;
512
638
for (uint32_t key= 0; key < table_arg.sizeKeys(); key++)
514
KeyInfo *pos= &table_arg.key_info[key];
515
KeyPartInfo *key_part= pos->key_part;
516
KeyPartInfo *key_part_end= key_part + pos->key_parts;
640
KEY *pos= table_arg.key_info+key;
641
KEY_PART_INFO *key_part= pos->key_part;
642
KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
518
644
for (; key_part != key_part_end; key_part++)
522
648
if (!(field->flags & AUTO_INCREMENT_FLAG))
529
std::string named_file= identifier.getPath();
530
named_file.append(ARZ);
657
We reuse name_buff since it is available.
659
fn_format(name_buff, table_name, "", ARZ,
660
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
533
if (azopen(create_stream.get(), named_file.c_str(), O_CREAT|O_RDWR,
663
if (azopen(&create_stream, name_buff, O_CREAT|O_RDWR,
534
664
AZ_METHOD_BLOCK) == 0)
537
unlink(named_file.c_str());
539
return(error ? error : -1);
543
proto.SerializeToString(&serialized_proto);
547
unlink(named_file.c_str());
549
return(error ? error : -1);
552
if (azwrite_frm(create_stream.get(), serialized_proto.c_str(),
670
proto.SerializeToString(&serialized_proto);
672
if (azwrite_frm(&create_stream, serialized_proto.c_str(),
553
673
serialized_proto.length()))
555
unlink(named_file.c_str());
557
return(error ? error : -1);
560
676
if (proto.options().has_comment())
562
678
int write_length;
564
write_length= azwrite_comment(create_stream.get(),
680
write_length= azwrite_comment(&create_stream,
565
681
proto.options().comment().c_str(),
566
682
proto.options().comment().length());
568
684
if (write_length < 0)
571
unlink(named_file.c_str());
573
return(error ? error : -1);
578
692
Yes you need to do this, because the starting value
579
693
for the autoincrement may not be zero.
581
create_stream->auto_increment= auto_increment_value ?
695
create_stream.auto_increment= auto_increment_value ?
582
696
auto_increment_value - 1 : 0;
584
if (azclose(create_stream.get()))
698
if (azclose(&create_stream))
587
unlink(named_file.c_str());
589
return(error ? error : -1);
710
/* Return error number, if we got one */
711
return(error ? error : -1);
624
743
uint32_t ha_archive::max_row_length(const unsigned char *)
626
uint32_t length= (uint32_t)(getTable()->getRecordLength() + getTable()->sizeFields()*2);
745
uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
627
746
length+= ARCHIVE_ROW_HEADER_SIZE;
629
748
uint32_t *ptr, *end;
630
for (ptr= getTable()->getBlobField(), end=ptr + getTable()->sizeBlobFields();
749
for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
634
length += 2 + ((Field_blob*)getTable()->getField(*ptr))->get_length();
753
length += 2 + ((Field_blob*)table->field[*ptr])->get_length();
646
765
return(HA_ERR_OUT_OF_MEM);
648
767
/* Copy null bits */
649
memcpy(&record_buffer[0], record, getTable()->getShare()->null_bytes);
650
ptr= &record_buffer[0] + getTable()->getShare()->null_bytes;
768
memcpy(record_buffer->buffer, record, table->s->null_bytes);
769
ptr= record_buffer->buffer + table->s->null_bytes;
652
for (Field **field=getTable()->getFields() ; *field ; field++)
771
for (Field **field=table->field ; *field ; field++)
654
773
if (!((*field)->is_null()))
655
774
ptr= (*field)->pack(ptr, record + (*field)->offset(record));
658
return((unsigned int) (ptr - &record_buffer[0]));
777
return((unsigned int) (ptr - record_buffer->buffer));
668
787
for implementing start_bulk_insert() is that we could skip
669
788
setting dirty to true each time.
671
int ha_archive::doInsertRecord(unsigned char *buf)
790
int ha_archive::write_row(unsigned char *buf)
674
793
unsigned char *read_buf= NULL;
675
794
uint64_t temp_auto;
676
unsigned char *record= getTable()->getInsertRecord();
795
unsigned char *record= table->record[0];
678
797
if (share->crashed)
679
798
return(HA_ERR_CRASHED_ON_USAGE);
681
pthread_mutex_lock(&share->mutex());
800
ha_statistic_increment(&SSV::ha_write_count);
801
pthread_mutex_lock(&share->mutex);
683
803
if (share->archive_write_open == false)
684
804
if (init_archive_writer())
685
805
return(HA_ERR_CRASHED_ON_USAGE);
688
if (getTable()->next_number_field && record == getTable()->getInsertRecord())
808
if (table->next_number_field && record == table->record[0])
810
KEY *mkey= &table->s->key_info[0]; // We only support one key right now
690
811
update_auto_increment();
691
temp_auto= getTable()->next_number_field->val_int();
812
temp_auto= table->next_number_field->val_int();
694
815
We don't support decremening auto_increment. They make the performance
697
818
if (temp_auto <= share->archive_write.auto_increment &&
698
getTable()->getShare()->getKeyInfo(0).flags & HA_NOSAME)
819
mkey->flags & HA_NOSAME)
700
821
rc= HA_ERR_FOUND_DUPP_KEY;
743
864
the optimizer that we have unique indexes, we scan
745
866
int ha_archive::index_read(unsigned char *buf, const unsigned char *key,
746
uint32_t key_len, enum ha_rkey_function)
867
uint32_t key_len, enum ha_rkey_function find_flag)
870
rc= index_read_idx(buf, active_index, key, key_len, find_flag);
875
int ha_archive::index_read_idx(unsigned char *buf, uint32_t index, const unsigned char *key,
876
uint32_t key_len, enum ha_rkey_function)
750
current_k_offset= getTable()->getShare()->getKeyInfo(0).key_part->offset;
880
KEY *mkey= &table->s->key_info[index];
881
current_k_offset= mkey->key_part->offset;
751
882
current_key= key;
752
883
current_key_len= key_len;
754
rc= doStartTableScan(true);
832
963
/* Reallocate buffer if needed */
833
964
bool ha_archive::fix_rec_buff(unsigned int length)
835
record_buffer.resize(length);
966
assert(record_buffer->buffer);
968
if (length > record_buffer->length)
970
unsigned char *newptr;
971
if (!(newptr= (unsigned char *)realloc(record_buffer->buffer, length)))
973
record_buffer->buffer= newptr;
974
record_buffer->length= length;
977
assert(length <= record_buffer->length);
840
982
int ha_archive::unpack_row(azio_stream *file_to_read, unsigned char *record)
854
996
/* Copy null bits */
855
memcpy(record, ptr, getTable()->getNullBytes());
856
ptr+= getTable()->getNullBytes();
857
for (Field **field= getTable()->getFields() ; *field ; field++)
997
memcpy(record, ptr, table->getNullBytes());
998
ptr+= table->getNullBytes();
999
for (Field **field=table->field ; *field ; field++)
859
1001
if (!((*field)->is_null()))
861
ptr= (*field)->unpack(record + (*field)->offset(getTable()->getInsertRecord()), ptr);
1003
ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
889
1031
return(HA_ERR_END_OF_FILE);
892
ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
1034
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
893
1035
current_position= aztell(&archive);
894
1036
rc= get_row(&archive, buf);
896
getTable()->status=rc ? STATUS_NOT_FOUND: 0;
1038
table->status=rc ? STATUS_NOT_FOUND: 0;
903
Thanks to the table bool is_ordered this will be called after
1045
Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
904
1046
each call to ha_archive::rnd_next() if an ordering of the rows is
908
1050
void ha_archive::position(const unsigned char *)
910
internal::my_store_ptr(ref, ref_length, current_position);
1052
my_store_ptr(ref, ref_length, current_position);
922
1064
int ha_archive::rnd_pos(unsigned char * buf, unsigned char *pos)
924
ha_statistic_increment(&system_status_var::ha_read_rnd_next_count);
925
current_position= (internal::my_off_t)internal::my_get_ptr(pos, ref_length);
1066
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1067
current_position= (my_off_t)my_get_ptr(pos, ref_length);
926
1068
if (azseek(&archive, (size_t)current_position, SEEK_SET) == (size_t)(-1L))
927
1069
return(HA_ERR_CRASHED_ON_USAGE);
928
1070
return(get_row(&archive, buf));
971
1115
azread_frm(&archive, proto_string);
973
1117
/* Lets create a file to contain the new data */
974
std::string writer_filename= share->table_name;
975
writer_filename.append(ARN);
1118
fn_format(writer_filename, share->table_name.c_str(), "", ARN,
1119
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
977
if (!(azopen(writer.get(), writer_filename.c_str(), O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1121
if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
979
1123
free(proto_string);
980
1124
return(HA_ERR_CRASHED_ON_USAGE);
983
azwrite_frm(writer.get(), proto_string, archive.frm_length);
1127
azwrite_frm(&writer, proto_string, archive.frm_length);
986
1130
An extended rebuild is a lot more effort. We open up each row and re-record it.
1012
1157
rows_restored= archive.rows;
1014
for (uint64_t x= 0; x < rows_restored ; x++)
1159
for (x= 0; x < rows_restored ; x++)
1016
rc= get_row(&archive, getTable()->getInsertRecord());
1161
rc= get_row(&archive, table->record[0]);
1021
real_write_row(getTable()->getInsertRecord(), writer.get());
1166
real_write_row(table->record[0], &writer);
1023
1168
Long term it should be possible to optimize this so that
1024
1169
it is not called on each row.
1026
if (getTable()->found_next_number_field)
1171
if (table->found_next_number_field)
1028
Field *field= getTable()->found_next_number_field;
1173
Field *field= table->found_next_number_field;
1030
1175
/* Since we will need to use field to translate, we need to flip its read bit */
1031
1176
field->setReadSet();
1033
1178
uint64_t auto_value=
1034
(uint64_t) field->val_int_internal(getTable()->getInsertRecord() +
1035
field->offset(getTable()->getInsertRecord()));
1179
(uint64_t) field->val_int(table->record[0] +
1180
field->offset(table->record[0]));
1036
1181
if (share->archive_write.auto_increment < auto_value)
1037
1182
stats.auto_increment_value=
1038
1183
(share->archive_write.auto_increment= auto_value) + 1;
1041
share->rows_recorded= (ha_rows)writer->rows;
1186
share->rows_recorded= (ha_rows)writer.rows;
1044
1189
if (rc && rc != HA_ERR_END_OF_FILE)
1145
1290
struct stat file_stat; // Stat information for the data file
1147
stat(share->data_file_name.c_str(), &file_stat);
1292
stat(share->data_file_name, &file_stat);
1149
stats.mean_rec_length= getTable()->getRecordLength()+ buffer.alloced_length();
1294
stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
1150
1295
stats.data_file_length= file_stat.st_size;
1151
1296
stats.create_time= file_stat.st_ctime;
1152
1297
stats.update_time= file_stat.st_mtime;
1352
We just return state if asked.
1354
bool ha_archive::is_crashed() const
1356
return(share->crashed);
1207
1360
Simple scan of the tables to make sure everything is ok.
1210
int ha_archive::check(Session* session)
1363
int ha_archive::check(Session* session, HA_CHECK_OPT *)
1213
1366
const char *old_proc_info;
1215
1369
old_proc_info= get_session_proc_info(session);
1216
1370
set_session_proc_info(session, "Checking table");
1217
1371
/* Flush any waiting data */
1218
pthread_mutex_lock(&share->mutex());
1372
pthread_mutex_lock(&share->mutex);
1219
1373
azflush(&(share->archive_write), Z_SYNC_FLUSH);
1220
pthread_mutex_unlock(&share->mutex());
1374
pthread_mutex_unlock(&share->mutex);
1223
1377
Now we will rewind the archive file so that we are positioned at the
1250
int ArchiveEngine::doRenameTable(Session&, const TableIdentifier &from, const TableIdentifier &to)
1254
for (const char **ext= bas_ext(); *ext ; ext++)
1256
if (rename_file_ext(from.getPath().c_str(), to.getPath().c_str(), *ext))
1258
if ((error=errno) != ENOENT)
1267
bool ArchiveEngine::doDoesTableExist(Session&,
1268
const TableIdentifier &identifier)
1270
string proto_path(identifier.getPath());
1271
proto_path.append(ARZ);
1273
if (access(proto_path.c_str(), F_OK))
1281
void ArchiveEngine::doGetTableIdentifiers(drizzled::CachedDirectory &directory,
1282
const drizzled::SchemaIdentifier &schema_identifier,
1283
drizzled::TableIdentifier::vector &set_of_identifiers)
1285
drizzled::CachedDirectory::Entries entries= directory.getEntries();
1287
for (drizzled::CachedDirectory::Entries::iterator entry_iter= entries.begin();
1288
entry_iter != entries.end(); ++entry_iter)
1290
drizzled::CachedDirectory::Entry *entry= *entry_iter;
1291
const string *filename= &entry->filename;
1293
assert(filename->size());
1295
const char *ext= strchr(filename->c_str(), '.');
1297
if (ext == NULL || my_strcasecmp(system_charset_info, ext, ARZ) ||
1298
(filename->compare(0, strlen(TMP_FILE_PREFIX), TMP_FILE_PREFIX) == 0))
1302
char uname[NAME_LEN + 1];
1303
uint32_t file_name_len;
1305
file_name_len= TableIdentifier::filename_to_tablename(filename->c_str(), uname, sizeof(uname));
1306
// TODO: Remove need for memory copy here
1307
uname[file_name_len - sizeof(ARZ) + 1]= '\0'; // Subtract ending, place NULL
1309
set_of_identifiers.push_back(TableIdentifier(schema_identifier, uname));
1404
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)
1406
archive_record_buffer *r;
1407
if (!(r= (archive_record_buffer*) malloc(sizeof(archive_record_buffer))))
1411
r->length= (int)length;
1413
if (!(r->buffer= (unsigned char*) malloc(r->length)))
1422
void ha_archive::destroy_record_buffer(archive_record_buffer *r)
1424
free((char*) r->buffer);
1429
static DRIZZLE_SYSVAR_BOOL(aio, archive_use_aio,
1430
PLUGIN_VAR_NOCMDOPT,
1431
"Whether or not to use asynchronous IO.",
1434
static struct st_mysql_sys_var* archive_system_variables[]= {
1435
DRIZZLE_SYSVAR(aio),
1439
drizzle_declare_plugin
1443
"Brian Aker, MySQL AB",
1444
"Archive storage engine",
1446
archive_db_init, /* Plugin Init */
1447
archive_db_done, /* Plugin Deinit */
1448
NULL, /* status variables */
1449
archive_system_variables, /* system variables */
1450
NULL /* config options */
1452
drizzle_declare_plugin_end;