~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to storage/archive/ha_archive.cc

  • Committer: Monty Taylor
  • Date: 2008-07-05 18:10:22 UTC
  • mto: This revision was merged to the branch mainline in revision 63.
  • Revision ID: monty@inaugust.com-20080705181022-nbrtrf77j68r8n5e
Added include for prototype.

Show diffs side-by-side

added added

removed removed

Lines of Context:
201
201
    FALSE       OK
202
202
*/
203
203
 
204
 
int archive_db_done(void *p __attribute__((__unused__)))
 
204
int archive_db_done(void *p)
205
205
{
206
206
  hash_free(&archive_open_tables);
207
207
  VOID(pthread_mutex_destroy(&archive_mutex));
221
221
  archive_reader_open= FALSE;
222
222
}
223
223
 
224
 
int archive_discover(handlerton *hton __attribute__((__unused__)),
225
 
                     THD* thd __attribute__((__unused__)),
226
 
                     const char *db,
 
224
int archive_discover(handlerton *hton, THD* thd, const char *db, 
227
225
                     const char *name,
228
 
                     uchar **frmblob,
 
226
                     uchar **frmblob, 
229
227
                     size_t *frmlen)
230
228
{
231
229
  DBUG_ENTER("archive_discover");
486
484
  Init out lock.
487
485
  We open the file we will read from.
488
486
*/
489
 
int ha_archive::open(const char *name,
490
 
                     int mode __attribute__((__unused__)),
491
 
                     uint open_options)
 
487
int ha_archive::open(const char *name, int mode, uint open_options)
492
488
{
493
489
  int rc= 0;
494
490
  DBUG_ENTER("ha_archive::open");
735
731
  the bytes required for the length in the header.
736
732
*/
737
733
 
738
 
uint32 ha_archive::max_row_length(const uchar *buf __attribute__((__unused__)))
 
734
uint32 ha_archive::max_row_length(const uchar *buf)
739
735
{
740
736
  uint32 length= (uint32)(table->s->reclength + table->s->fields*2);
741
737
  length+= ARCHIVE_ROW_HEADER_SIZE;
894
890
}
895
891
 
896
892
 
897
 
void ha_archive::get_auto_increment(uint64_t offset __attribute__((__unused__)),
898
 
                                    uint64_t increment __attribute__((__unused__)),
899
 
                                    uint64_t nb_desired_values __attribute__((__unused__)),
900
 
                                    uint64_t *first_value __attribute__((__unused__)),
901
 
                                    uint64_t *nb_reserved_values __attribute__((__unused__)))
 
893
void ha_archive::get_auto_increment(uint64_t offset, uint64_t increment,
 
894
                                    uint64_t nb_desired_values,
 
895
                                    uint64_t *first_value,
 
896
                                    uint64_t *nb_reserved_values)
902
897
{
903
898
  *nb_reserved_values= ULONGLONG_MAX;
904
899
  *first_value= share->archive_write.auto_increment + 1;
905
900
}
906
901
 
907
902
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
908
 
int ha_archive::index_init(uint keynr, bool sorted __attribute__((__unused__)))
 
903
int ha_archive::index_init(uint keynr, bool sorted)
909
904
{
910
905
  DBUG_ENTER("ha_archive::index_init");
911
906
  active_index= keynr;
928
923
 
929
924
 
930
925
int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
931
 
                               uint key_len,
932
 
                               enum ha_rkey_function find_flag __attribute__((__unused__)))
 
926
                                 uint key_len, enum ha_rkey_function find_flag)
933
927
{
934
928
  int rc;
935
929
  bool found= 0;
1128
1122
  needed.
1129
1123
*/
1130
1124
 
1131
 
void ha_archive::position(const uchar *record __attribute__((__unused__)))
 
1125
void ha_archive::position(const uchar *record)
1132
1126
{
1133
1127
  DBUG_ENTER("ha_archive::position");
1134
1128
  my_store_ptr(ref, ref_length, current_position);
1175
1169
  The table can become fragmented if data was inserted, read, and then
1176
1170
  inserted again. What we do is open up the file and recompress it completely. 
1177
1171
*/
1178
 
int ha_archive::optimize(THD* thd __attribute__((__unused__)),
1179
 
                         HA_CHECK_OPT* check_opt __attribute__((__unused__)))
 
1172
int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
1180
1173
{
1181
1174
  DBUG_ENTER("ha_archive::optimize");
1182
1175
  int rc= 0;
1467
1460
  Simple scan of the tables to make sure everything is ok.
1468
1461
*/
1469
1462
 
1470
 
int ha_archive::check(THD* thd,
1471
 
                      HA_CHECK_OPT* check_opt __attribute__((__unused__)))
 
1463
int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
1472
1464
{
1473
1465
  int rc= 0;
1474
1466
  const char *old_proc_info;