~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to storage/archive/ha_archive.cc

  • Committer: Monty Taylor
  • Date: 2008-10-22 01:52:54 UTC
  • Revision ID: monty@inaugust.com-20081022015254-65qfk9f2v0b8jlk3
Moved drizzle_com to drizzled/drizzle_common. Started splitting it up.

Show diffs side-by-side

added added

removed removed

Lines of Context:
13
13
  along with this program; if not, write to the Free Software
14
14
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
15
15
 
16
 
#ifdef USE_PRAGMA_IMPLEMENTATION
17
 
#pragma implementation        // gcc: Class implementation
18
 
#endif
19
16
 
20
17
#include <drizzled/common_includes.h>
21
18
#include <storage/myisam/myisam.h>
98
95
/* The file extension */
99
96
#define ARZ ".ARZ"               // The data file
100
97
#define ARN ".ARN"               // Files used during an optimize call
101
 
#define ARM ".ARM"               // Meta file (deprecated)
102
98
 
103
 
/*
104
 
  unsigned char + unsigned char
105
 
*/
106
 
#define DATA_BUFFER_SIZE 2       // Size of the data used in the data file
107
 
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
108
99
 
109
100
/* Static declarations for handerton */
110
101
static handler *archive_create_handler(handlerton *hton, 
111
102
                                       TABLE_SHARE *table, 
112
103
                                       MEM_ROOT *mem_root);
113
 
int archive_discover(handlerton *hton, THD* thd, const char *db, 
 
104
int archive_discover(handlerton *hton, Session* session, const char *db, 
114
105
                     const char *name,
115
106
                     unsigned char **frmblob, 
116
107
                     size_t *frmlen);
217
208
}
218
209
 
219
210
int archive_discover(handlerton *hton __attribute__((unused)),
220
 
                     THD* thd __attribute__((unused)),
 
211
                     Session* session __attribute__((unused)),
221
212
                     const char *db,
222
213
                     const char *name,
223
214
                     unsigned char **frmblob,
233
224
  if (stat(az_file, &file_stat))
234
225
    goto err;
235
226
 
236
 
  if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY, AZ_METHOD_BLOCK)))
 
227
  if (!(azopen(&frm_stream, az_file, O_RDONLY, AZ_METHOD_BLOCK)))
237
228
  {
238
229
    if (errno == EROFS || errno == EACCES)
239
230
      return(my_errno= errno);
321
312
      anything but reading... open it for write and we will generate null
322
313
      compression writes).
323
314
    */
324
 
    if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY,
 
315
    if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY,
325
316
                 AZ_METHOD_BLOCK)))
326
317
    {
327
318
      pthread_mutex_destroy(&share->mutex);
393
384
    that is shared amoung all open tables.
394
385
  */
395
386
  if (!(azopen(&(share->archive_write), share->data_file_name, 
396
 
               O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
 
387
               O_RDWR, AZ_METHOD_BLOCK)))
397
388
  {
398
389
    share->crashed= true;
399
390
    return(1);
429
420
    default:
430
421
      method= AZ_METHOD_BLOCK;
431
422
    }
432
 
    if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY, 
 
423
    if (!(azopen(&archive, share->data_file_name, O_RDONLY, 
433
424
                 method)))
434
425
    {
435
426
      share->crashed= true;
603
594
  if (!stat(name_buff, &file_stat))
604
595
  {
605
596
    my_errno= 0;
606
 
    if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY,
 
597
    if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR,
607
598
                 AZ_METHOD_BLOCK)))
608
599
    {
609
600
      error= errno;
1078
1069
  rewriting the meta file. Currently it does this by calling optimize with
1079
1070
  the extended flag.
1080
1071
*/
1081
 
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
 
1072
int ha_archive::repair(Session* session, HA_CHECK_OPT* check_opt)
1082
1073
{
1083
1074
  check_opt->flags= T_EXTEND;
1084
 
  int rc= optimize(thd, check_opt);
 
1075
  int rc= optimize(session, check_opt);
1085
1076
 
1086
1077
  if (rc)
1087
1078
    return(HA_ERR_CRASHED_ON_REPAIR);
1094
1085
  The table can become fragmented if data was inserted, read, and then
1095
1086
  inserted again. What we do is open up the file and recompress it completely. 
1096
1087
*/
1097
 
int ha_archive::optimize(THD* thd __attribute__((unused)),
 
1088
int ha_archive::optimize(Session* session __attribute__((unused)),
1098
1089
                         HA_CHECK_OPT* check_opt __attribute__((unused)))
1099
1090
{
1100
1091
  int rc= 0;
1114
1105
  fn_format(writer_filename, share->table_name, "", ARN, 
1115
1106
            MY_REPLACE_EXT | MY_UNPACK_FILENAME);
1116
1107
 
1117
 
  if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
 
1108
  if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR, AZ_METHOD_BLOCK)))
1118
1109
    return(HA_ERR_CRASHED_ON_USAGE); 
1119
1110
 
1120
1111
  /* 
1198
1189
/* 
1199
1190
  Below is an example of how to setup row level locking.
1200
1191
*/
1201
 
THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
 
1192
THR_LOCK_DATA **ha_archive::store_lock(Session *session,
1202
1193
                                       THR_LOCK_DATA **to,
1203
1194
                                       enum thr_lock_type lock_type)
1204
1195
{
1217
1208
    */
1218
1209
 
1219
1210
    if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
1220
 
         lock_type <= TL_WRITE) && !thd_in_lock_tables(thd)
1221
 
        && !thd_tablespace_op(thd))
 
1211
         lock_type <= TL_WRITE) && !session_in_lock_tables(session)
 
1212
        && !session_tablespace_op(session))
1222
1213
      lock_type = TL_WRITE_ALLOW_WRITE;
1223
1214
 
1224
1215
    /* 
1229
1220
      concurrent inserts to t2. 
1230
1221
    */
1231
1222
 
1232
 
    if (lock_type == TL_READ_NO_INSERT && !thd_in_lock_tables(thd)) 
 
1223
    if (lock_type == TL_READ_NO_INSERT && !session_in_lock_tables(session)) 
1233
1224
      lock_type = TL_READ;
1234
1225
 
1235
1226
    lock.type=lock_type;
1364
1355
  Simple scan of the tables to make sure everything is ok.
1365
1356
*/
1366
1357
 
1367
 
int ha_archive::check(THD* thd,
 
1358
int ha_archive::check(Session* session,
1368
1359
                      HA_CHECK_OPT* check_opt __attribute__((unused)))
1369
1360
{
1370
1361
  int rc= 0;
1371
1362
  const char *old_proc_info;
1372
1363
  uint64_t x;
1373
1364
 
1374
 
  old_proc_info= thd_proc_info(thd, "Checking table");
 
1365
  old_proc_info= get_session_proc_info(session);
 
1366
  set_session_proc_info(session, "Checking table");
1375
1367
  /* Flush any waiting data */
1376
1368
  pthread_mutex_lock(&share->mutex);
1377
1369
  azflush(&(share->archive_write), Z_SYNC_FLUSH);
1392
1384
      break;
1393
1385
  }
1394
1386
 
1395
 
  thd_proc_info(thd, old_proc_info);
 
1387
  set_session_proc_info(session, old_proc_info);
1396
1388
 
1397
1389
  if ((rc && rc != HA_ERR_END_OF_FILE))  
1398
1390
  {
1408
1400
/*
1409
1401
  Check and repair the table if needed.
1410
1402
*/
1411
 
bool ha_archive::check_and_repair(THD *thd) 
 
1403
bool ha_archive::check_and_repair(Session *session) 
1412
1404
{
1413
1405
  HA_CHECK_OPT check_opt;
1414
1406
 
1415
1407
  check_opt.init();
1416
1408
 
1417
 
  return(repair(thd, &check_opt));
 
1409
  return(repair(session, &check_opt));
1418
1410
}
1419
1411
 
1420
1412
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length)