~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to storage/archive/ha_archive.cc

Merged build changes from Antony.

Show diffs side-by-side

added added

removed removed

Lines of Context:
17
17
#pragma implementation        // gcc: Class implementation
18
18
#endif
19
19
 
20
 
#include "mysql_priv.h"
21
 
#include <myisam.h>
 
20
#include <drizzled/mysql_priv.h>
 
21
#include <storage/myisam/myisam.h>
22
22
 
23
23
#include "ha_archive.h"
24
 
#include <my_dir.h>
25
 
 
26
 
#include <mysql/plugin.h>
27
24
 
28
25
/*
29
26
  First, if you want to understand storage engines you should look at 
200
197
    false       OK
201
198
*/
202
199
 
203
 
int archive_db_done(void *p __attribute__((__unused__)))
 
200
int archive_db_done(void *p __attribute__((unused)))
204
201
{
205
202
  hash_free(&archive_open_tables);
206
203
  VOID(pthread_mutex_destroy(&archive_mutex));
220
217
  archive_reader_open= false;
221
218
}
222
219
 
223
 
int archive_discover(handlerton *hton __attribute__((__unused__)),
224
 
                     THD* thd __attribute__((__unused__)),
 
220
int archive_discover(handlerton *hton __attribute__((unused)),
 
221
                     THD* thd __attribute__((unused)),
225
222
                     const char *db,
226
223
                     const char *name,
227
224
                     uchar **frmblob,
467
464
  We open the file we will read from.
468
465
*/
469
466
int ha_archive::open(const char *name,
470
 
                     int mode __attribute__((__unused__)),
 
467
                     int mode __attribute__((unused)),
471
468
                     uint open_options)
472
469
{
473
470
  int rc= 0;
694
691
  the bytes required for the length in the header.
695
692
*/
696
693
 
697
 
uint32 ha_archive::max_row_length(const uchar *buf __attribute__((__unused__)))
 
694
uint32_t ha_archive::max_row_length(const uchar *buf __attribute__((unused)))
698
695
{
699
 
  uint32 length= (uint32)(table->s->reclength + table->s->fields*2);
 
696
  uint32_t length= (uint32_t)(table->s->reclength + table->s->fields*2);
700
697
  length+= ARCHIVE_ROW_HEADER_SIZE;
701
698
 
702
699
  uint *ptr, *end;
845
842
}
846
843
 
847
844
 
848
 
void ha_archive::get_auto_increment(uint64_t offset __attribute__((__unused__)),
849
 
                                    uint64_t increment __attribute__((__unused__)),
850
 
                                    uint64_t nb_desired_values __attribute__((__unused__)),
851
 
                                    uint64_t *first_value __attribute__((__unused__)),
852
 
                                    uint64_t *nb_reserved_values __attribute__((__unused__)))
 
845
void ha_archive::get_auto_increment(uint64_t offset __attribute__((unused)),
 
846
                                    uint64_t increment __attribute__((unused)),
 
847
                                    uint64_t nb_desired_values __attribute__((unused)),
 
848
                                    uint64_t *first_value __attribute__((unused)),
 
849
                                    uint64_t *nb_reserved_values __attribute__((unused)))
853
850
{
854
851
  *nb_reserved_values= UINT64_MAX;
855
852
  *first_value= share->archive_write.auto_increment + 1;
856
853
}
857
854
 
858
855
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
859
 
int ha_archive::index_init(uint keynr, bool sorted __attribute__((__unused__)))
 
856
int ha_archive::index_init(uint keynr, bool sorted __attribute__((unused)))
860
857
{
861
858
  active_index= keynr;
862
859
  return(0);
878
875
 
879
876
int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
880
877
                               uint key_len,
881
 
                               enum ha_rkey_function find_flag __attribute__((__unused__)))
 
878
                               enum ha_rkey_function find_flag __attribute__((unused)))
882
879
{
883
880
  int rc;
884
881
  bool found= 0;
1054
1051
  needed.
1055
1052
*/
1056
1053
 
1057
 
void ha_archive::position(const uchar *record __attribute__((__unused__)))
 
1054
void ha_archive::position(const uchar *record __attribute__((unused)))
1058
1055
{
1059
1056
  my_store_ptr(ref, ref_length, current_position);
1060
1057
  return;
1098
1095
  The table can become fragmented if data was inserted, read, and then
1099
1096
  inserted again. What we do is open up the file and recompress it completely. 
1100
1097
*/
1101
 
int ha_archive::optimize(THD* thd __attribute__((__unused__)),
1102
 
                         HA_CHECK_OPT* check_opt __attribute__((__unused__)))
 
1098
int ha_archive::optimize(THD* thd __attribute__((unused)),
 
1099
                         HA_CHECK_OPT* check_opt __attribute__((unused)))
1103
1100
{
1104
1101
  int rc= 0;
1105
1102
  azio_stream writer;
1371
1368
*/
1372
1369
 
1373
1370
int ha_archive::check(THD* thd,
1374
 
                      HA_CHECK_OPT* check_opt __attribute__((__unused__)))
 
1371
                      HA_CHECK_OPT* check_opt __attribute__((unused)))
1375
1372
{
1376
1373
  int rc= 0;
1377
1374
  const char *old_proc_info;