~drizzle-trunk/drizzle/development

1 by brian
clean slate
1
/* Copyright (C) 2003 MySQL AB
2
3
  This program is free software; you can redistribute it and/or modify
4
  it under the terms of the GNU General Public License as published by
5
  the Free Software Foundation; version 2 of the License.
6
7
  This program is distributed in the hope that it will be useful,
8
  but WITHOUT ANY WARRANTY; without even the implied warranty of
9
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
  GNU General Public License for more details.
11
12
  You should have received a copy of the GNU General Public License
13
  along with this program; if not, write to the Free Software
14
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
15
16
#ifdef USE_PRAGMA_IMPLEMENTATION
17
#pragma implementation        // gcc: Class implementation
18
#endif
19
243.1.14 by Jay Pipes
* Ensured all drizzled/field/x.cc files to include mysql_priv.h
20
#include <drizzled/common_includes.h>
212.4.2 by Monty Taylor
Fixed the includes in places to make the myisam header file move work.
21
#include <storage/myisam/myisam.h>
1 by brian
clean slate
22
23
#include "ha_archive.h"
24
25
/*
26
  First, if you want to understand storage engines you should look at 
27
  ha_example.cc and ha_example.h. 
28
29
  This example was written as a test case for a customer who needed
30
  a storage engine without indexes that could compress data very well.
31
  So, welcome to a completely compressed storage engine. This storage
32
  engine only does inserts. No replace, deletes, or updates. All reads are 
33
  complete table scans. Compression is done through a combination of packing
34
  and making use of the zlib library
35
  
36
  We keep a file pointer open for each instance of ha_archive for each read
37
  but for writes we keep one open file handle just for that. We flush it
38
  only if we have a read occur. azip handles compressing lots of records
39
  at once much better then doing lots of little records between writes.
40
  It is possible to not lock on writes but this would then mean we couldn't
41
  handle bulk inserts as well (that is if someone was trying to read at
42
  the same time since we would want to flush).
43
44
  A "meta" file is kept alongside the data file. This file serves two purpose.
45
  The first purpose is to track the number of rows in the table. The second 
46
  purpose is to determine if the table was closed properly or not. When the 
47
  meta file is first opened it is marked as dirty. It is opened when the table 
48
  itself is opened for writing. When the table is closed the new count for rows 
49
  is written to the meta file and the file is marked as clean. If the meta file 
50
  is opened and it is marked as dirty, it is assumed that a crash occured. At 
51
  this point an error occurs and the user is told to rebuild the file.
52
  A rebuild scans the rows and rewrites the meta file. If corruption is found
53
  in the data file then the meta file is not repaired.
54
55
  At some point a recovery method for such a drastic case needs to be divised.
56
57
  Locks are row level, and you will get a consistant read. 
58
59
  For performance as far as table scans go it is quite fast. I don't have
60
  good numbers but locally it has out performed both Innodb and MyISAM. For
61
  Innodb the question will be if the table can be fit into the buffer
62
  pool. For MyISAM its a question of how much the file system caches the
63
  MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
64
  doesn't have enough memory to cache entire table that archive turns out 
65
  to be any faster. 
66
67
  Examples between MyISAM (packed) and Archive.
68
69
  Table with 76695844 identical rows:
70
  29680807 a_archive.ARZ
71
  920350317 a.MYD
72
73
74
  Table with 8991478 rows (all of Slashdot's comments):
75
  1922964506 comment_archive.ARZ
76
  2944970297 comment_text.MYD
77
78
79
  TODO:
80
   Allow users to set compression level.
81
   Allow adjustable block size.
82
   Implement versioning, should be easy.
83
   Allow for errors, find a way to mark bad rows.
84
   Add optional feature so that rows can be flushed at interval (which will cause less
85
     compression but may speed up ordered searches).
86
   Checkpoint the meta file to allow for faster rebuilds.
87
   Option to allow for dirty reads, this would lower the sync calls, which would make
88
     inserts a lot faster, but would mean highly arbitrary reads.
89
90
    -Brian
91
*/
92
93
/* Variables for archive share methods */
94
pthread_mutex_t archive_mutex;
95
static HASH archive_open_tables;
96
static unsigned int global_version;
97
98
/* The file extension */
99
#define ARZ ".ARZ"               // The data file
100
#define ARN ".ARN"               // Files used during an optimize call
101
#define ARM ".ARM"               // Meta file (deprecated)
102
103
/*
104
  uchar + uchar
105
*/
106
#define DATA_BUFFER_SIZE 2       // Size of the data used in the data file
107
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
108
109
/* Static declarations for handerton */
110
static handler *archive_create_handler(handlerton *hton, 
111
                                       TABLE_SHARE *table, 
112
                                       MEM_ROOT *mem_root);
113
int archive_discover(handlerton *hton, THD* thd, const char *db, 
114
                     const char *name,
115
                     uchar **frmblob, 
116
                     size_t *frmlen);
117
278 by Brian Aker
Fixed my_bool issue in options.
118
static bool archive_use_aio= false;
1 by brian
clean slate
119
120
/*
121
  Number of rows that will force a bulk insert.
122
*/
123
#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
124
125
/*
126
  Size of header used for row
127
*/
128
#define ARCHIVE_ROW_HEADER_SIZE 4
129
130
static handler *archive_create_handler(handlerton *hton,
131
                                       TABLE_SHARE *table, 
132
                                       MEM_ROOT *mem_root)
133
{
134
  return new (mem_root) ha_archive(hton, table);
135
}
136
137
/*
138
  Used for hash table that tracks open tables.
139
*/
140
static uchar* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
282 by Brian Aker
Modified blackhole and archive to remove my_bool.
141
                             bool not_used __attribute__((unused)))
1 by brian
clean slate
142
{
143
  *length=share->table_name_length;
144
  return (uchar*) share->table_name;
145
}
146
147
148
/*
149
  Initialize the archive handler.
150
151
  SYNOPSIS
152
    archive_db_init()
153
    void *
154
155
  RETURN
163 by Brian Aker
Merge Monty's code.
156
    false       OK
157
    true        Error
1 by brian
clean slate
158
*/
159
160
int archive_db_init(void *p)
161
{
162
  handlerton *archive_hton;
163
164
  archive_hton= (handlerton *)p;
165
  archive_hton->state= SHOW_OPTION_YES;
166
  archive_hton->create= archive_create_handler;
167
  archive_hton->flags= HTON_NO_FLAGS;
168
  archive_hton->discover= archive_discover;
169
170
  /* When the engine starts up set the first version */
171
  global_version= 1;
172
173
  if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
174
    goto error;
175
  if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
176
                (hash_get_key) archive_get_key, 0, 0))
177
  {
178
    VOID(pthread_mutex_destroy(&archive_mutex));
179
  }
180
  else
181
  {
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
182
    return(false);
1 by brian
clean slate
183
  }
184
error:
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
185
  return(true);
1 by brian
clean slate
186
}
187
188
/*
189
  Release the archive handler.
190
191
  SYNOPSIS
192
    archive_db_done()
193
    void
194
195
  RETURN
163 by Brian Aker
Merge Monty's code.
196
    false       OK
1 by brian
clean slate
197
*/
198
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
199
int archive_db_done(void *p __attribute__((unused)))
1 by brian
clean slate
200
{
201
  hash_free(&archive_open_tables);
202
  VOID(pthread_mutex_destroy(&archive_mutex));
203
204
  return 0;
205
}
206
207
208
ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg)
209
  :handler(hton, table_arg), delayed_insert(0), bulk_insert(0)
210
{
211
  /* Set our original buffer from pre-allocated memory */
212
  buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
213
214
  /* The size of the offset value we will use for position() */
215
  ref_length= sizeof(my_off_t);
163 by Brian Aker
Merge Monty's code.
216
  archive_reader_open= false;
1 by brian
clean slate
217
}
218
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
219
int archive_discover(handlerton *hton __attribute__((unused)),
220
                     THD* thd __attribute__((unused)),
53.2.32 by Monty Taylor
First large swath at getting handler stuff clean.
221
                     const char *db,
1 by brian
clean slate
222
                     const char *name,
53.2.32 by Monty Taylor
First large swath at getting handler stuff clean.
223
                     uchar **frmblob,
1 by brian
clean slate
224
                     size_t *frmlen)
225
{
226
  azio_stream frm_stream;
227
  char az_file[FN_REFLEN];
228
  char *frm_ptr;
15 by brian
Fix for stat, NETWARE removal
229
  struct stat file_stat; 
1 by brian
clean slate
230
231
  fn_format(az_file, name, db, ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
232
15 by brian
Fix for stat, NETWARE removal
233
  if (stat(az_file, &file_stat))
1 by brian
clean slate
234
    goto err;
235
236
  if (!(azopen(&frm_stream, az_file, O_RDONLY|O_BINARY, AZ_METHOD_BLOCK)))
237
  {
238
    if (errno == EROFS || errno == EACCES)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
239
      return(my_errno= errno);
240
    return(HA_ERR_CRASHED_ON_USAGE);
1 by brian
clean slate
241
  }
242
243
  if (frm_stream.frm_length == 0)
244
    goto err;
245
246
  frm_ptr= (char *)my_malloc(sizeof(char) * frm_stream.frm_length, MYF(0));
247
  azread_frm(&frm_stream, frm_ptr);
248
  azclose(&frm_stream);
249
250
  *frmlen= frm_stream.frm_length;
251
  *frmblob= (uchar*) frm_ptr;
252
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
253
  return(0);
1 by brian
clean slate
254
err:
255
  my_errno= 0;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
256
  return(1);
1 by brian
clean slate
257
}
258
259
/*
260
  This method reads the header of a datafile and returns whether or not it was successful.
261
*/
262
int ha_archive::read_data_header(azio_stream *file_to_read)
263
{
264
  if (azread_init(file_to_read) == -1)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
265
    return(HA_ERR_CRASHED_ON_USAGE);
1 by brian
clean slate
266
267
  if (file_to_read->version >= 3)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
268
    return(0);
1 by brian
clean slate
269
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
270
  return(1);
1 by brian
clean slate
271
}
272
273
274
/*
275
  We create the shared memory space that we will use for the open table. 
276
  No matter what we try to get or create a share. This is so that a repair
277
  table operation can occur. 
278
279
  See ha_example.cc for a longer description.
280
*/
281
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
282
{
283
  uint length;
284
285
  pthread_mutex_lock(&archive_mutex);
286
  length=(uint) strlen(table_name);
287
288
  if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
289
                                           (uchar*) table_name,
290
                                           length)))
291
  {
292
    char *tmp_name;
293
    azio_stream archive_tmp;
294
295
    if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
296
                          &share, sizeof(*share),
297
                          &tmp_name, length+1,
298
                          NullS)) 
299
    {
300
      pthread_mutex_unlock(&archive_mutex);
301
      *rc= HA_ERR_OUT_OF_MEM;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
302
      return(NULL);
1 by brian
clean slate
303
    }
304
305
    share->use_count= 0;
306
    share->table_name_length= length;
307
    share->table_name= tmp_name;
163 by Brian Aker
Merge Monty's code.
308
    share->crashed= false;
309
    share->archive_write_open= false;
1 by brian
clean slate
310
    fn_format(share->data_file_name, table_name, "",
311
              ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
411.1.1 by Brian Aker
Work on removing GNU specific calls.
312
    my_stpcpy(share->table_name, table_name);
1 by brian
clean slate
313
    /*
314
      We will use this lock for rows.
315
    */
316
    VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
317
    
318
    /*
319
      We read the meta file, but do not mark it dirty. Since we are not
320
      doing a write we won't mark it dirty (and we won't open it for
321
      anything but reading... open it for write and we will generate null
322
      compression writes).
323
    */
324
    if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY,
325
                 AZ_METHOD_BLOCK)))
326
    {
327
      VOID(pthread_mutex_destroy(&share->mutex));
328
      free(share);
329
      pthread_mutex_unlock(&archive_mutex);
330
      *rc= HA_ERR_CRASHED_ON_REPAIR;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
331
      return(NULL);
1 by brian
clean slate
332
    }
333
    stats.auto_increment_value= archive_tmp.auto_increment + 1;
334
    share->rows_recorded= (ha_rows)archive_tmp.rows;
335
    share->crashed= archive_tmp.dirty;
336
    if (share->version < global_version)
337
    {
338
      share->version_rows= share->rows_recorded;
339
      share->version= global_version;
340
    }
341
    azclose(&archive_tmp);
342
343
    VOID(my_hash_insert(&archive_open_tables, (uchar*) share));
344
    thr_lock_init(&share->lock);
345
  }
346
  share->use_count++;
347
  if (share->crashed)
348
    *rc= HA_ERR_CRASHED_ON_USAGE;
349
  pthread_mutex_unlock(&archive_mutex);
350
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
351
  return(share);
1 by brian
clean slate
352
}
353
354
355
/* 
356
  Free the share.
357
  See ha_example.cc for a description.
358
*/
359
int ha_archive::free_share()
360
{
361
  int rc= 0;
362
363
  pthread_mutex_lock(&archive_mutex);
364
  if (!--share->use_count)
365
  {
366
    hash_delete(&archive_open_tables, (uchar*) share);
367
    thr_lock_delete(&share->lock);
368
    VOID(pthread_mutex_destroy(&share->mutex));
369
    /* 
370
      We need to make sure we don't reset the crashed state.
371
      If we open a crashed file, wee need to close it as crashed unless
372
      it has been repaired.
373
      Since we will close the data down after this, we go on and count
374
      the flush on close;
375
    */
163 by Brian Aker
Merge Monty's code.
376
    if (share->archive_write_open == true)
1 by brian
clean slate
377
    {
378
      if (azclose(&(share->archive_write)))
379
        rc= 1;
380
    }
381
    my_free((uchar*) share, MYF(0));
382
  }
383
  pthread_mutex_unlock(&archive_mutex);
384
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
385
  return(rc);
1 by brian
clean slate
386
}
387
388
int ha_archive::init_archive_writer()
389
{
390
  /* 
391
    It is expensive to open and close the data files and since you can't have
392
    a gzip file that can be both read and written we keep a writer open
393
    that is shared amoung all open tables.
394
  */
395
  if (!(azopen(&(share->archive_write), share->data_file_name, 
396
               O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
397
  {
163 by Brian Aker
Merge Monty's code.
398
    share->crashed= true;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
399
    return(1);
1 by brian
clean slate
400
  }
163 by Brian Aker
Merge Monty's code.
401
  share->archive_write_open= true;
1 by brian
clean slate
402
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
403
  return(0);
1 by brian
clean slate
404
}
405
406
407
/* 
408
  No locks are required because it is associated with just one handler instance
409
*/
410
int ha_archive::init_archive_reader()
411
{
412
  /* 
413
    It is expensive to open and close the data files and since you can't have
414
    a gzip file that can be both read and written we keep a writer open
415
    that is shared amoung all open tables.
416
  */
163 by Brian Aker
Merge Monty's code.
417
  if (archive_reader_open == false)
1 by brian
clean slate
418
  {
419
    az_method method;
420
421
    switch (archive_use_aio)
422
    {
163 by Brian Aker
Merge Monty's code.
423
    case false:
1 by brian
clean slate
424
      method= AZ_METHOD_BLOCK;
425
      break;
163 by Brian Aker
Merge Monty's code.
426
    case true:
1 by brian
clean slate
427
      method= AZ_METHOD_AIO;
428
      break;
429
    default:
430
      method= AZ_METHOD_BLOCK;
431
    }
432
    if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY, 
433
                 method)))
434
    {
163 by Brian Aker
Merge Monty's code.
435
      share->crashed= true;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
436
      return(1);
1 by brian
clean slate
437
    }
163 by Brian Aker
Merge Monty's code.
438
    archive_reader_open= true;
1 by brian
clean slate
439
  }
440
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
441
  return(0);
1 by brian
clean slate
442
}
443
444
445
/*
446
  We just implement one additional file extension.
447
*/
448
static const char *ha_archive_exts[] = {
449
  ARZ,
450
  NullS
451
};
452
453
const char **ha_archive::bas_ext() const
454
{
455
  return ha_archive_exts;
456
}
457
458
459
/* 
460
  When opening a file we:
461
  Create/get our shared structure.
462
  Init out lock.
463
  We open the file we will read from.
464
*/
53.2.32 by Monty Taylor
First large swath at getting handler stuff clean.
465
int ha_archive::open(const char *name,
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
466
                     int mode __attribute__((unused)),
53.2.32 by Monty Taylor
First large swath at getting handler stuff clean.
467
                     uint open_options)
1 by brian
clean slate
468
{
469
  int rc= 0;
470
  share= get_share(name, &rc);
471
472
  if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR))
473
  {
474
    /* purecov: begin inspected */
475
    free_share();
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
476
    return(rc);
1 by brian
clean slate
477
    /* purecov: end */    
478
  }
479
  else if (rc == HA_ERR_OUT_OF_MEM)
480
  {
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
481
    return(rc);
1 by brian
clean slate
482
  }
483
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
484
  assert(share);
1 by brian
clean slate
485
486
  record_buffer= create_record_buffer(table->s->reclength + 
487
                                      ARCHIVE_ROW_HEADER_SIZE);
488
489
  if (!record_buffer)
490
  {
491
    free_share();
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
492
    return(HA_ERR_OUT_OF_MEM);
1 by brian
clean slate
493
  }
494
495
  thr_lock_data_init(&share->lock, &lock, NULL);
496
497
  if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
498
  {
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
499
    return(0);
1 by brian
clean slate
500
  }
501
  else
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
502
    return(rc);
1 by brian
clean slate
503
}
504
505
506
/*
507
  Closes the file.
508
509
  SYNOPSIS
510
    close();
511
  
512
  IMPLEMENTATION:
513
514
  We first close this storage engines file handle to the archive and
515
  then remove our reference count to the table (and possibly free it
516
  as well).
517
518
  RETURN
519
    0  ok
520
    1  Error
521
*/
522
523
int ha_archive::close(void)
524
{
525
  int rc= 0;
526
527
  destroy_record_buffer(record_buffer);
528
529
  /* First close stream */
163 by Brian Aker
Merge Monty's code.
530
  if (archive_reader_open == true)
1 by brian
clean slate
531
  {
532
    if (azclose(&archive))
533
      rc= 1;
534
  }
535
  /* then also close share */
536
  rc|= free_share();
537
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
538
  return(rc);
1 by brian
clean slate
539
}
540
541
542
/*
543
  We create our data file here. The format is pretty simple. 
544
  You can read about the format of the data file above.
545
  Unlike other storage engines we do not "pack" our data. Since we 
546
  are about to do a general compression, packing would just be a waste of 
547
  CPU time. If the table has blobs they are written after the row in the order 
548
  of creation.
549
*/
550
327.1.5 by Brian Aker
Refactor around classes. TABLE_LIST has been factored out of table.h
551
int ha_archive::create(const char *name, Table *table_arg,
1 by brian
clean slate
552
                       HA_CREATE_INFO *create_info)
553
{
554
  char name_buff[FN_REFLEN];
555
  char linkname[FN_REFLEN];
556
  int error;
557
  azio_stream create_stream;            /* Archive file we are working with */
558
  File frm_file;                   /* File handler for readers */
15 by brian
Fix for stat, NETWARE removal
559
  struct stat file_stat;
1 by brian
clean slate
560
  uchar *frm_ptr;
561
562
  stats.auto_increment_value= create_info->auto_increment_value;
563
327.1.1 by Brian Aker
First pass in encapsulating table (it is now an object, no longer a structure).
564
  for (uint key= 0; key < table_arg->sizeKeys(); key++)
1 by brian
clean slate
565
  {
566
    KEY *pos= table_arg->key_info+key;
567
    KEY_PART_INFO *key_part=     pos->key_part;
568
    KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
569
570
    for (; key_part != key_part_end; key_part++)
571
    {
572
      Field *field= key_part->field;
573
574
      if (!(field->flags & AUTO_INCREMENT_FLAG))
575
      {
576
        error= -1;
577
        goto error;
578
      }
579
    }
580
  }
581
582
  /* 
583
    We reuse name_buff since it is available.
584
  */
585
  if (create_info->data_file_name && create_info->data_file_name[0] != '#')
586
  {
587
    fn_format(name_buff, create_info->data_file_name, "", ARZ,
588
              MY_REPLACE_EXT | MY_UNPACK_FILENAME);
589
    fn_format(linkname, name, "", ARZ,
590
              MY_REPLACE_EXT | MY_UNPACK_FILENAME);
591
  }
592
  else
593
  {
594
    fn_format(name_buff, name, "", ARZ,
595
              MY_REPLACE_EXT | MY_UNPACK_FILENAME);
596
    linkname[0]= 0;
597
  }
598
599
  /*
600
    There is a chance that the file was "discovered". In this case
601
    just use whatever file is there.
602
  */
15 by brian
Fix for stat, NETWARE removal
603
  if (!stat(name_buff, &file_stat))
1 by brian
clean slate
604
  {
605
    my_errno= 0;
606
    if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY,
607
                 AZ_METHOD_BLOCK)))
608
    {
609
      error= errno;
610
      goto error2;
611
    }
612
613
    if (linkname[0])
614
      my_symlink(name_buff, linkname, MYF(0));
615
    fn_format(name_buff, name, "", ".frm",
616
              MY_REPLACE_EXT | MY_UNPACK_FILENAME);
617
618
    /*
619
      Here is where we open up the frm and pass it to archive to store 
620
    */
621
    if ((frm_file= my_open(name_buff, O_RDONLY, MYF(0))) > 0)
622
    {
15 by brian
Fix for stat, NETWARE removal
623
      if (fstat(frm_file, &file_stat))
1 by brian
clean slate
624
      {
625
        frm_ptr= (uchar *)my_malloc(sizeof(uchar) * file_stat.st_size, MYF(0));
626
        if (frm_ptr)
627
        {
628
          my_read(frm_file, frm_ptr, file_stat.st_size, MYF(0));
629
          azwrite_frm(&create_stream, (char *)frm_ptr, file_stat.st_size);
630
          my_free((uchar*)frm_ptr, MYF(0));
631
        }
632
      }
633
      my_close(frm_file, MYF(0));
634
    }
635
636
    if (create_info->comment.str)
637
      azwrite_comment(&create_stream, create_info->comment.str, 
638
                      (unsigned int)create_info->comment.length);
639
640
    /* 
641
      Yes you need to do this, because the starting value 
642
      for the autoincrement may not be zero.
643
    */
644
    create_stream.auto_increment= stats.auto_increment_value ?
645
                                    stats.auto_increment_value - 1 : 0;
646
    if (azclose(&create_stream))
647
    {
648
      error= errno;
649
      goto error2;
650
    }
651
  }
652
  else
653
    my_errno= 0;
654
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
655
  return(0);
1 by brian
clean slate
656
657
error2:
658
  delete_table(name);
659
error:
660
  /* Return error number, if we got one */
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
661
  return(error ? error : -1);
1 by brian
clean slate
662
}
663
664
/*
665
  This is where the actual row is written out.
666
*/
667
int ha_archive::real_write_row(uchar *buf, azio_stream *writer)
668
{
669
  my_off_t written;
670
  unsigned int r_pack_length;
671
672
  /* We pack the row for writing */
673
  r_pack_length= pack_row(buf);
674
675
  written= azwrite_row(writer, record_buffer->buffer, r_pack_length);
676
  if (written != r_pack_length)
677
  {
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
678
    return(-1);
1 by brian
clean slate
679
  }
680
681
  if (!delayed_insert || !bulk_insert)
163 by Brian Aker
Merge Monty's code.
682
    share->dirty= true;
1 by brian
clean slate
683
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
684
  return(0);
1 by brian
clean slate
685
}
686
687
688
/* 
689
  Calculate max length needed for row. This includes
690
  the bytes required for the length in the header.
691
*/
692
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
693
uint32_t ha_archive::max_row_length(const uchar *buf __attribute__((unused)))
1 by brian
clean slate
694
{
327.1.1 by Brian Aker
First pass in encapsulating table (it is now an object, no longer a structure).
695
  uint32_t length= (uint32_t)(table->getRecordLength() + table->sizeFields()*2);
1 by brian
clean slate
696
  length+= ARCHIVE_ROW_HEADER_SIZE;
697
698
  uint *ptr, *end;
327.1.1 by Brian Aker
First pass in encapsulating table (it is now an object, no longer a structure).
699
  for (ptr= table->getBlobField(), end=ptr + table->sizeBlobFields();
1 by brian
clean slate
700
       ptr != end ;
701
       ptr++)
702
  {
703
      length += 2 + ((Field_blob*)table->field[*ptr])->get_length();
704
  }
705
706
  return length;
707
}
708
709
710
unsigned int ha_archive::pack_row(uchar *record)
711
{
712
  uchar *ptr;
713
714
  if (fix_rec_buff(max_row_length(record)))
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
715
    return(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
1 by brian
clean slate
716
717
  /* Copy null bits */
718
  memcpy(record_buffer->buffer, record, table->s->null_bytes);
719
  ptr= record_buffer->buffer + table->s->null_bytes;
720
721
  for (Field **field=table->field ; *field ; field++)
722
  {
723
    if (!((*field)->is_null()))
724
      ptr= (*field)->pack(ptr, record + (*field)->offset(record));
725
  }
726
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
727
  return((unsigned int) (ptr - record_buffer->buffer));
1 by brian
clean slate
728
}
729
730
731
/* 
732
  Look at ha_archive::open() for an explanation of the row format.
733
  Here we just write out the row.
734
735
  Wondering about start_bulk_insert()? We don't implement it for
736
  archive since it optimizes for lots of writes. The only save
737
  for implementing start_bulk_insert() is that we could skip 
738
  setting dirty to true each time.
739
*/
740
int ha_archive::write_row(uchar *buf)
741
{
742
  int rc;
743
  uchar *read_buf= NULL;
744
  uint64_t temp_auto;
745
  uchar *record=  table->record[0];
746
747
  if (share->crashed)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
748
    return(HA_ERR_CRASHED_ON_USAGE);
1 by brian
clean slate
749
750
  ha_statistic_increment(&SSV::ha_write_count);
751
  if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
752
    table->timestamp_field->set_time();
753
  pthread_mutex_lock(&share->mutex);
754
163 by Brian Aker
Merge Monty's code.
755
  if (share->archive_write_open == false)
1 by brian
clean slate
756
    if (init_archive_writer())
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
757
      return(HA_ERR_CRASHED_ON_USAGE);
1 by brian
clean slate
758
759
760
  if (table->next_number_field && record == table->record[0])
761
  {
762
    KEY *mkey= &table->s->key_info[0]; // We only support one key right now
763
    update_auto_increment();
764
    temp_auto= table->next_number_field->val_int();
765
766
    /*
767
      We don't support decremening auto_increment. They make the performance
768
      just cry.
769
    */
770
    if (temp_auto <= share->archive_write.auto_increment && 
771
        mkey->flags & HA_NOSAME)
772
    {
773
      rc= HA_ERR_FOUND_DUPP_KEY;
774
      goto error;
775
    }
776
#ifdef DEAD_CODE
777
    /*
778
      Bad news, this will cause a search for the unique value which is very 
779
      expensive since we will have to do a table scan which will lock up 
780
      all other writers during this period. This could perhaps be optimized 
781
      in the future.
782
    */
783
    {
784
      /* 
785
        First we create a buffer that we can use for reading rows, and can pass
786
        to get_row().
787
      */
788
      if (!(read_buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
789
      {
790
        rc= HA_ERR_OUT_OF_MEM;
791
        goto error;
792
      }
793
       /* 
794
         All of the buffer must be written out or we won't see all of the
795
         data 
796
       */
797
      azflush(&(share->archive_write), Z_SYNC_FLUSH);
798
      /*
799
        Set the position of the local read thread to the beginning postion.
800
      */
801
      if (read_data_header(&archive))
802
      {
803
        rc= HA_ERR_CRASHED_ON_USAGE;
804
        goto error;
805
      }
806
807
      Field *mfield= table->next_number_field;
808
809
      while (!(get_row(&archive, read_buf)))
810
      {
811
        if (!memcmp(read_buf + mfield->offset(record),
812
                    table->next_number_field->ptr,
813
                    mfield->max_display_length()))
814
        {
815
          rc= HA_ERR_FOUND_DUPP_KEY;
816
          goto error;
817
        }
818
      }
819
    }
820
#endif
821
    else
822
    {
823
      if (temp_auto > share->archive_write.auto_increment)
824
        stats.auto_increment_value=
825
          (share->archive_write.auto_increment= temp_auto) + 1;
826
    }
827
  }
828
829
  /*
830
    Notice that the global auto_increment has been increased.
831
    In case of a failed row write, we will never try to reuse the value.
832
  */
833
  share->rows_recorded++;
834
  rc= real_write_row(buf,  &(share->archive_write));
835
error:
836
  pthread_mutex_unlock(&share->mutex);
837
  if (read_buf)
838
    my_free((uchar*) read_buf, MYF(0));
839
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
840
  return(rc);
1 by brian
clean slate
841
}
842
843
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
844
void ha_archive::get_auto_increment(uint64_t offset __attribute__((unused)),
845
                                    uint64_t increment __attribute__((unused)),
846
                                    uint64_t nb_desired_values __attribute__((unused)),
847
                                    uint64_t *first_value __attribute__((unused)),
848
                                    uint64_t *nb_reserved_values __attribute__((unused)))
1 by brian
clean slate
849
{
163 by Brian Aker
Merge Monty's code.
850
  *nb_reserved_values= UINT64_MAX;
1 by brian
clean slate
851
  *first_value= share->archive_write.auto_increment + 1;
852
}
853
854
/* Initialized at each key walk (called multiple times unlike rnd_init()) */
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
855
int ha_archive::index_init(uint keynr, bool sorted __attribute__((unused)))
1 by brian
clean slate
856
{
857
  active_index= keynr;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
858
  return(0);
1 by brian
clean slate
859
}
860
861
862
/*
863
  No indexes, so if we get a request for an index search since we tell
864
  the optimizer that we have unique indexes, we scan
865
*/
866
int ha_archive::index_read(uchar *buf, const uchar *key,
867
                             uint key_len, enum ha_rkey_function find_flag)
868
{
869
  int rc;
870
  rc= index_read_idx(buf, active_index, key, key_len, find_flag);
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
871
  return(rc);
1 by brian
clean slate
872
}
873
874
875
int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
53.2.32 by Monty Taylor
First large swath at getting handler stuff clean.
876
                               uint key_len,
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
877
                               enum ha_rkey_function find_flag __attribute__((unused)))
1 by brian
clean slate
878
{
879
  int rc;
880
  bool found= 0;
881
  KEY *mkey= &table->s->key_info[index];
882
  current_k_offset= mkey->key_part->offset;
883
  current_key= key;
884
  current_key_len= key_len;
885
163 by Brian Aker
Merge Monty's code.
886
  rc= rnd_init(true);
1 by brian
clean slate
887
888
  if (rc)
889
    goto error;
890
891
  while (!(get_row(&archive, buf)))
892
  {
893
    if (!memcmp(current_key, buf + current_k_offset, current_key_len))
894
    {
895
      found= 1;
896
      break;
897
    }
898
  }
899
900
  if (found)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
901
    return(0);
1 by brian
clean slate
902
903
error:
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
904
  return(rc ? rc : HA_ERR_END_OF_FILE);
1 by brian
clean slate
905
}
906
907
908
int ha_archive::index_next(uchar * buf) 
909
{ 
910
  bool found= 0;
911
912
  while (!(get_row(&archive, buf)))
913
  {
914
    if (!memcmp(current_key, buf+current_k_offset, current_key_len))
915
    {
916
      found= 1;
917
      break;
918
    }
919
  }
920
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
921
  return(found ? 0 : HA_ERR_END_OF_FILE); 
1 by brian
clean slate
922
}
923
924
/*
925
  All calls that need to scan the table start with this method. If we are told
926
  that it is a table scan we rewind the file to the beginning, otherwise
927
  we assume the position will be set.
928
*/
929
930
int ha_archive::rnd_init(bool scan)
931
{
932
  if (share->crashed)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
933
      return(HA_ERR_CRASHED_ON_USAGE);
1 by brian
clean slate
934
935
  init_archive_reader();
936
937
  /* We rewind the file so that we can read from the beginning if scan */
938
  if (scan)
939
  {
940
    if (read_data_header(&archive))
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
941
      return(HA_ERR_CRASHED_ON_USAGE);
1 by brian
clean slate
942
  }
943
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
944
  return(0);
1 by brian
clean slate
945
}
946
947
948
/*
949
  This is the method that is used to read a row. It assumes that the row is 
950
  positioned where you want it.
951
*/
952
int ha_archive::get_row(azio_stream *file_to_read, uchar *buf)
953
{
954
  int rc;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
955
1 by brian
clean slate
956
  if (file_to_read->version == ARCHIVE_VERSION)
957
    rc= get_row_version3(file_to_read, buf);
958
  else
959
    rc= -1;
960
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
961
  return(rc);
1 by brian
clean slate
962
}
963
964
/* Reallocate buffer if needed */
965
bool ha_archive::fix_rec_buff(unsigned int length)
966
{
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
967
  assert(record_buffer->buffer);
1 by brian
clean slate
968
969
  if (length > record_buffer->length)
970
  {
971
    uchar *newptr;
972
    if (!(newptr=(uchar*) my_realloc((uchar*) record_buffer->buffer, 
973
                                    length,
974
				    MYF(MY_ALLOW_ZERO_PTR))))
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
975
      return(1);
1 by brian
clean slate
976
    record_buffer->buffer= newptr;
977
    record_buffer->length= length;
978
  }
979
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
980
  assert(length <= record_buffer->length);
1 by brian
clean slate
981
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
982
  return(0);
1 by brian
clean slate
983
}
984
985
int ha_archive::unpack_row(azio_stream *file_to_read, uchar *record)
986
{
987
  unsigned int read;
988
  int error;
989
  const uchar *ptr;
990
991
  read= azread_row(file_to_read, &error);
992
  ptr= (const uchar *)file_to_read->row_ptr;
993
994
  if (error || read == 0)
995
  {
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
996
    return(-1);
1 by brian
clean slate
997
  }
998
999
  /* Copy null bits */
327.1.1 by Brian Aker
First pass in encapsulating table (it is now an object, no longer a structure).
1000
  memcpy(record, ptr, table->getNullBytes());
1001
  ptr+= table->getNullBytes();
1 by brian
clean slate
1002
  for (Field **field=table->field ; *field ; field++)
1003
  {
1004
    if (!((*field)->is_null()))
1005
    {
1006
      ptr= (*field)->unpack(record + (*field)->offset(table->record[0]), ptr);
1007
    }
1008
  }
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1009
  return(0);
1 by brian
clean slate
1010
}
1011
1012
1013
int ha_archive::get_row_version3(azio_stream *file_to_read, uchar *buf)
1014
{
1015
  int returnable= unpack_row(file_to_read, buf);
1016
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1017
  return(returnable);
1 by brian
clean slate
1018
}
1019
1020
1021
/* 
1022
  Called during ORDER BY. Its position is either from being called sequentially
1023
  or by having had ha_archive::rnd_pos() called before it is called.
1024
*/
1025
1026
int ha_archive::rnd_next(uchar *buf)
1027
{
1028
  int rc;
1029
1030
  if (share->crashed)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1031
      return(HA_ERR_CRASHED_ON_USAGE);
1 by brian
clean slate
1032
1033
  if (!scan_rows)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1034
    return(HA_ERR_END_OF_FILE);
1 by brian
clean slate
1035
  scan_rows--;
1036
1037
  ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1038
  current_position= aztell(&archive);
1039
  rc= get_row(&archive, buf);
1040
1041
  table->status=rc ? STATUS_NOT_FOUND: 0;
1042
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1043
  return(rc);
1 by brian
clean slate
1044
}
1045
1046
1047
/*
1048
  Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
1049
  each call to ha_archive::rnd_next() if an ordering of the rows is
1050
  needed.
1051
*/
1052
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
1053
void ha_archive::position(const uchar *record __attribute__((unused)))
1 by brian
clean slate
1054
{
1055
  my_store_ptr(ref, ref_length, current_position);
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1056
  return;
1 by brian
clean slate
1057
}
1058
1059
1060
/*
1061
  This is called after a table scan for each row if the results of the
1062
  scan need to be ordered. It will take *pos and use it to move the
1063
  cursor in the file so that the next row that is called is the
1064
  correctly ordered row.
1065
*/
1066
1067
int ha_archive::rnd_pos(uchar * buf, uchar *pos)
1068
{
1069
  ha_statistic_increment(&SSV::ha_read_rnd_next_count);
1070
  current_position= (my_off_t)my_get_ptr(pos, ref_length);
1071
  if (azseek(&archive, (size_t)current_position, SEEK_SET) == (size_t)(-1L))
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1072
    return(HA_ERR_CRASHED_ON_USAGE);
1073
  return(get_row(&archive, buf));
1 by brian
clean slate
1074
}
1075
1076
/*
1077
  This method repairs the meta file. It does this by walking the datafile and 
1078
  rewriting the meta file. Currently it does this by calling optimize with
1079
  the extended flag.
1080
*/
1081
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
1082
{
1083
  check_opt->flags= T_EXTEND;
1084
  int rc= optimize(thd, check_opt);
1085
1086
  if (rc)
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1087
    return(HA_ERR_CRASHED_ON_REPAIR);
1 by brian
clean slate
1088
163 by Brian Aker
Merge Monty's code.
1089
  share->crashed= false;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1090
  return(0);
1 by brian
clean slate
1091
}
1092
1093
/*
1094
  The table can become fragmented if data was inserted, read, and then
1095
  inserted again. What we do is open up the file and recompress it completely. 
1096
*/
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
1097
int ha_archive::optimize(THD* thd __attribute__((unused)),
1098
                         HA_CHECK_OPT* check_opt __attribute__((unused)))
1 by brian
clean slate
1099
{
1100
  int rc= 0;
1101
  azio_stream writer;
1102
  char writer_filename[FN_REFLEN];
1103
1104
  init_archive_reader();
1105
1106
  // now we close both our writer and our reader for the rename
1107
  if (share->archive_write_open)
1108
  {
1109
    azclose(&(share->archive_write));
163 by Brian Aker
Merge Monty's code.
1110
    share->archive_write_open= false;
1 by brian
clean slate
1111
  }
1112
1113
  /* Lets create a file to contain the new data */
1114
  fn_format(writer_filename, share->table_name, "", ARN, 
1115
            MY_REPLACE_EXT | MY_UNPACK_FILENAME);
1116
1117
  if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY, AZ_METHOD_BLOCK)))
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1118
    return(HA_ERR_CRASHED_ON_USAGE); 
1 by brian
clean slate
1119
1120
  /* 
1121
    An extended rebuild is a lot more effort. We open up each row and re-record it. 
1122
    Any dead rows are removed (aka rows that may have been partially recorded). 
1123
1124
    As of Archive format 3, this is the only type that is performed, before this
1125
    version it was just done on T_EXTEND
1126
  */
1127
  if (1)
1128
  {
1129
    /*
1130
      Now we will rewind the archive file so that we are positioned at the 
1131
      start of the file.
1132
    */
1133
    azflush(&archive, Z_SYNC_FLUSH);
1134
    rc= read_data_header(&archive);
1135
1136
    /* 
1137
      On success of writing out the new header, we now fetch each row and
1138
      insert it into the new archive file. 
1139
    */
1140
    if (!rc)
1141
    {
53.2.2 by Monty Taylor
Updated everything that needs updating to compile with -std=gnu99 -pedantic
1142
      uint64_t x;
1143
      uint64_t rows_restored;
1 by brian
clean slate
1144
      share->rows_recorded= 0;
1145
      stats.auto_increment_value= 1;
1146
      share->archive_write.auto_increment= 0;
1147
1148
      rows_restored= archive.rows;
1149
1150
      for (x= 0; x < rows_restored ; x++)
1151
      {
1152
        rc= get_row(&archive, table->record[0]);
1153
1154
        if (rc != 0)
1155
          break;
1156
1157
        real_write_row(table->record[0], &writer);
1158
        /*
1159
          Long term it should be possible to optimize this so that
1160
          it is not called on each row.
1161
        */
1162
        if (table->found_next_number_field)
1163
        {
1164
          Field *field= table->found_next_number_field;
1165
          uint64_t auto_value=
1166
            (uint64_t) field->val_int(table->record[0] +
1167
                                       field->offset(table->record[0]));
1168
          if (share->archive_write.auto_increment < auto_value)
1169
            stats.auto_increment_value=
1170
              (share->archive_write.auto_increment= auto_value) + 1;
1171
        }
1172
      }
1173
      share->rows_recorded= (ha_rows)writer.rows;
1174
    }
1175
1176
    if (rc && rc != HA_ERR_END_OF_FILE)
1177
    {
1178
      goto error;
1179
    }
1180
  } 
1181
1182
  azclose(&writer);
163 by Brian Aker
Merge Monty's code.
1183
  share->dirty= false;
1 by brian
clean slate
1184
  
1185
  azclose(&archive);
1186
1187
  // make the file we just wrote be our data file
1188
  rc = my_rename(writer_filename,share->data_file_name,MYF(0));
1189
1190
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1191
  return(rc);
1 by brian
clean slate
1192
error:
1193
  azclose(&writer);
1194
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1195
  return(rc); 
1 by brian
clean slate
1196
}
1197
1198
/* 
1199
  Below is an example of how to setup row level locking.
1200
*/
1201
THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
1202
                                       THR_LOCK_DATA **to,
1203
                                       enum thr_lock_type lock_type)
1204
{
1205
  if (lock_type == TL_WRITE_DELAYED)
163 by Brian Aker
Merge Monty's code.
1206
    delayed_insert= true;
1 by brian
clean slate
1207
  else
163 by Brian Aker
Merge Monty's code.
1208
    delayed_insert= false;
1 by brian
clean slate
1209
1210
  if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) 
1211
  {
1212
    /* 
1213
      Here is where we get into the guts of a row level lock.
1214
      If TL_UNLOCK is set 
327.1.5 by Brian Aker
Refactor around classes. TABLE_LIST has been factored out of table.h
1215
      If we are not doing a LOCK Table or DISCARD/IMPORT
1 by brian
clean slate
1216
      TABLESPACE, then allow multiple writers 
1217
    */
1218
1219
    if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
1220
         lock_type <= TL_WRITE) && !thd_in_lock_tables(thd)
1221
        && !thd_tablespace_op(thd))
1222
      lock_type = TL_WRITE_ALLOW_WRITE;
1223
1224
    /* 
1225
      In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
1226
      MySQL would use the lock TL_READ_NO_INSERT on t2, and that
1227
      would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
1228
      to t2. Convert the lock to a normal read lock to allow
1229
      concurrent inserts to t2. 
1230
    */
1231
1232
    if (lock_type == TL_READ_NO_INSERT && !thd_in_lock_tables(thd)) 
1233
      lock_type = TL_READ;
1234
1235
    lock.type=lock_type;
1236
  }
1237
1238
  *to++= &lock;
1239
1240
  return to;
1241
}
1242
1243
void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
1244
{
1245
  ha_archive::info(HA_STATUS_AUTO);
1246
  if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
1247
  {
1248
    create_info->auto_increment_value= stats.auto_increment_value;
1249
  }
1250
1251
  if (!(my_readlink(share->real_path, share->data_file_name, MYF(0))))
1252
    create_info->data_file_name= share->real_path;
1253
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1254
  return;
1 by brian
clean slate
1255
}
1256
1257
1258
/*
1259
  Hints for optimizer, see ha_tina for more information
1260
*/
1261
int ha_archive::info(uint flag)
1262
{
1263
  /* 
1264
    If dirty, we lock, and then reset/flush the data.
1265
    I found that just calling azflush() doesn't always work.
1266
  */
1267
  pthread_mutex_lock(&share->mutex);
163 by Brian Aker
Merge Monty's code.
1268
  if (share->dirty == true)
1 by brian
clean slate
1269
  {
1270
    azflush(&(share->archive_write), Z_SYNC_FLUSH);
1271
    share->rows_recorded= share->archive_write.rows;
163 by Brian Aker
Merge Monty's code.
1272
    share->dirty= false;
1 by brian
clean slate
1273
    if (share->version < global_version)
1274
    {
1275
      share->version_rows= share->rows_recorded;
1276
      share->version= global_version;
1277
    }
1278
1279
  }
1280
1281
  /* 
1282
    This should be an accurate number now, though bulk and delayed inserts can
1283
    cause the number to be inaccurate.
1284
  */
1285
  stats.records= share->rows_recorded;
1286
  pthread_mutex_unlock(&share->mutex);
1287
1288
  scan_rows= stats.records;
1289
  stats.deleted= 0;
1290
1291
  /* Costs quite a bit more to get all information */
1292
  if (flag & HA_STATUS_TIME)
1293
  {
15 by brian
Fix for stat, NETWARE removal
1294
    struct stat file_stat;  // Stat information for the data file
1 by brian
clean slate
1295
15 by brian
Fix for stat, NETWARE removal
1296
    VOID(stat(share->data_file_name, &file_stat));
1 by brian
clean slate
1297
327.1.1 by Brian Aker
First pass in encapsulating table (it is now an object, no longer a structure).
1298
    stats.mean_rec_length= table->getRecordLength()+ buffer.alloced_length();
1 by brian
clean slate
1299
    stats.data_file_length= file_stat.st_size;
1300
    stats.create_time= file_stat.st_ctime;
1301
    stats.update_time= file_stat.st_mtime;
1302
    stats.max_data_file_length= share->rows_recorded * stats.mean_rec_length;
1303
  }
1304
  stats.delete_length= 0;
1305
  stats.index_file_length=0;
1306
1307
  if (flag & HA_STATUS_AUTO)
1308
  {
1309
    init_archive_reader();
1310
    pthread_mutex_lock(&share->mutex);
1311
    azflush(&archive, Z_SYNC_FLUSH);
1312
    pthread_mutex_unlock(&share->mutex);
1313
    stats.auto_increment_value= archive.auto_increment + 1;
1314
  }
1315
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1316
  return(0);
1 by brian
clean slate
1317
}
1318
1319
1320
/*
1321
  This method tells us that a bulk insert operation is about to occur. We set
1322
  a flag which will keep write_row from saying that its data is dirty. This in
1323
  turn will keep selects from causing a sync to occur.
1324
  Basically, yet another optimizations to keep compression working well.
1325
*/
1326
void ha_archive::start_bulk_insert(ha_rows rows)
1327
{
1328
  if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
163 by Brian Aker
Merge Monty's code.
1329
    bulk_insert= true;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1330
  return;
1 by brian
clean slate
1331
}
1332
1333
1334
/* 
1335
  Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
1336
  flag, and set the share dirty so that the next select will call sync for us.
1337
*/
1338
int ha_archive::end_bulk_insert()
1339
{
163 by Brian Aker
Merge Monty's code.
1340
  bulk_insert= false;
1341
  share->dirty= true;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1342
  return(0);
1 by brian
clean slate
1343
}
1344
1345
/*
1346
  We cancel a truncate command. The only way to delete an archive table is to drop it.
1347
  This is done for security reasons. In a later version we will enable this by 
1348
  allowing the user to select a different row format.
1349
*/
1350
int ha_archive::delete_all_rows()
1351
{
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1352
  return(HA_ERR_WRONG_COMMAND);
1 by brian
clean slate
1353
}
1354
1355
/*
1356
  We just return state if asked.
1357
*/
1358
bool ha_archive::is_crashed() const 
1359
{
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1360
  return(share->crashed); 
1 by brian
clean slate
1361
}
1362
1363
/*
1364
  Simple scan of the tables to make sure everything is ok.
1365
*/
1366
53.2.32 by Monty Taylor
First large swath at getting handler stuff clean.
1367
int ha_archive::check(THD* thd,
212.1.3 by Monty Taylor
Renamed __attribute__((__unused__)) to __attribute__((unused)).
1368
                      HA_CHECK_OPT* check_opt __attribute__((unused)))
1 by brian
clean slate
1369
{
1370
  int rc= 0;
1371
  const char *old_proc_info;
53.2.2 by Monty Taylor
Updated everything that needs updating to compile with -std=gnu99 -pedantic
1372
  uint64_t x;
1 by brian
clean slate
1373
1374
  old_proc_info= thd_proc_info(thd, "Checking table");
1375
  /* Flush any waiting data */
1376
  pthread_mutex_lock(&share->mutex);
1377
  azflush(&(share->archive_write), Z_SYNC_FLUSH);
1378
  pthread_mutex_unlock(&share->mutex);
1379
1380
  /*
1381
    Now we will rewind the archive file so that we are positioned at the 
1382
    start of the file.
1383
  */
1384
  init_archive_reader();
1385
  azflush(&archive, Z_SYNC_FLUSH);
1386
  read_data_header(&archive);
1387
  for (x= 0; x < share->archive_write.rows; x++)
1388
  {
1389
    rc= get_row(&archive, table->record[0]);
1390
1391
    if (rc != 0)
1392
      break;
1393
  }
1394
1395
  thd_proc_info(thd, old_proc_info);
1396
1397
  if ((rc && rc != HA_ERR_END_OF_FILE))  
1398
  {
163 by Brian Aker
Merge Monty's code.
1399
    share->crashed= false;
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1400
    return(HA_ADMIN_CORRUPT);
1 by brian
clean slate
1401
  }
1402
  else
1403
  {
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1404
    return(HA_ADMIN_OK);
1 by brian
clean slate
1405
  }
1406
}
1407
1408
/*
1409
  Check and repair the table if needed.
1410
*/
1411
bool ha_archive::check_and_repair(THD *thd) 
1412
{
1413
  HA_CHECK_OPT check_opt;
1414
1415
  check_opt.init();
1416
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1417
  return(repair(thd, &check_opt));
1 by brian
clean slate
1418
}
1419
1420
archive_record_buffer *ha_archive::create_record_buffer(unsigned int length) 
1421
{
1422
  archive_record_buffer *r;
1423
  if (!(r= 
1424
        (archive_record_buffer*) my_malloc(sizeof(archive_record_buffer),
1425
                                           MYF(MY_WME))))
1426
  {
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1427
    return(NULL); /* purecov: inspected */
1 by brian
clean slate
1428
  }
1429
  r->length= (int)length;
1430
1431
  if (!(r->buffer= (uchar*) my_malloc(r->length,
1432
                                    MYF(MY_WME))))
1433
  {
1434
    my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1435
    return(NULL); /* purecov: inspected */
1 by brian
clean slate
1436
  }
1437
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1438
  return(r);
1 by brian
clean slate
1439
}
1440
1441
void ha_archive::destroy_record_buffer(archive_record_buffer *r) 
1442
{
1443
  my_free((char*) r->buffer, MYF(MY_ALLOW_ZERO_PTR));
1444
  my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR));
51.3.8 by Jay Pipes
Removed DBUG from CSV and Blackhole storage engines
1445
  return;
1 by brian
clean slate
1446
}
1447
319.1.1 by Grant Limberg
renamed all instances of MYSQL_ to DRIZZLE_
1448
static DRIZZLE_SYSVAR_BOOL(aio, archive_use_aio,
1 by brian
clean slate
1449
  PLUGIN_VAR_NOCMDOPT,
1450
  "Whether or not to use asynchronous IO.",
163 by Brian Aker
Merge Monty's code.
1451
  NULL, NULL, true);
1 by brian
clean slate
1452
1453
static struct st_mysql_sys_var* archive_system_variables[]= {
319.1.1 by Grant Limberg
renamed all instances of MYSQL_ to DRIZZLE_
1454
  DRIZZLE_SYSVAR(aio),
1 by brian
clean slate
1455
  NULL
1456
};
1457
1458
mysql_declare_plugin(archive)
1459
{
319.1.1 by Grant Limberg
renamed all instances of MYSQL_ to DRIZZLE_
1460
  DRIZZLE_STORAGE_ENGINE_PLUGIN,
1 by brian
clean slate
1461
  "ARCHIVE",
177.4.3 by mark
ripped out more plugin ABI and API version checking, and plugin versions are now strings
1462
  "3.5",
1 by brian
clean slate
1463
  "Brian Aker, MySQL AB",
1464
  "Archive storage engine",
1465
  PLUGIN_LICENSE_GPL,
1466
  archive_db_init, /* Plugin Init */
1467
  archive_db_done, /* Plugin Deinit */
1468
  NULL,                       /* status variables                */
1469
  archive_system_variables,   /* system variables                */
1470
  NULL                        /* config options                  */
1471
}
1472
mysql_declare_plugin_end;
1473