1
/* Copyright (C) 2000-2006 MySQL AB
3
This program is free software; you can redistribute it and/or modify
4
it under the terms of the GNU General Public License as published by
5
the Free Software Foundation; version 2 of the License.
7
This program is distributed in the hope that it will be useful,
8
but WITHOUT ANY WARRANTY; without even the implied warranty of
9
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
GNU General Public License for more details.
12
You should have received a copy of the GNU General Public License
13
along with this program; if not, write to the Free Software
14
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
17
#ifdef USE_PRAGMA_IMPLEMENTATION
18
#pragma implementation // gcc: Class implementation
21
#define MYSQL_SERVER 1
22
#include "mysql_priv.h"
23
#include <mysql/plugin.h>
27
static handler *heap_create_handler(handlerton *hton,
31
int heap_panic(handlerton *hton, ha_panic_function flag)
33
return hp_panic(flag);
37
int heap_init(void *p)
39
handlerton *heap_hton;
41
heap_hton= (handlerton *)p;
42
heap_hton->state= SHOW_OPTION_YES;
43
heap_hton->db_type= DB_TYPE_HEAP;
44
heap_hton->create= heap_create_handler;
45
heap_hton->panic= heap_panic;
46
heap_hton->flags= HTON_CAN_RECREATE;
51
static handler *heap_create_handler(handlerton *hton,
55
return new (mem_root) ha_heap(hton, table);
59
/*****************************************************************************
61
*****************************************************************************/
63
ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg)
64
:handler(hton, table_arg), file(0), records_changed(0), key_stat_version(0),
69
static const char *ha_heap_exts[] = {
73
const char **ha_heap::bas_ext() const
79
Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to
80
rec_per_key) after 1/HEAP_STATS_UPDATE_THRESHOLD fraction of table records
81
have been inserted/updated/deleted. delete_all_rows() and table flush cause
85
hash index statistics must be updated when number of table records changes
86
from 0 to non-zero value and vice versa. Otherwise records_in_range may
87
erroneously return 0 and 'range' may miss records.
89
#define HEAP_STATS_UPDATE_THRESHOLD 10
91
int ha_heap::open(const char *name, int mode, uint test_if_locked)
93
if ((test_if_locked & HA_OPEN_INTERNAL_TABLE) || (!(file= heap_open(name, mode)) && my_errno == ENOENT))
95
HA_CREATE_INFO create_info;
96
internal_table= test(test_if_locked & HA_OPEN_INTERNAL_TABLE);
97
bzero(&create_info, sizeof(create_info));
99
if (!create(name, table, &create_info))
101
file= internal_table ?
102
heap_open_from_share(internal_share, mode) :
103
heap_open_from_share_and_register(internal_share, mode);
106
/* Couldn't open table; Remove the newly created table */
107
pthread_mutex_lock(&THR_LOCK_heap);
108
hp_free(internal_share);
109
pthread_mutex_unlock(&THR_LOCK_heap);
114
ref_length= sizeof(HEAP_PTR);
117
/* Initialize variables for the opened table */
118
set_keys_for_scanning();
120
We cannot run update_key_stats() here because we do not have a
121
lock on the table. The 'records' count might just be changed
122
temporarily at this moment and we might get wrong statistics (Bug
123
#10178). Instead we request for update. This will be done in
124
ha_heap::info(), which is always called before key statistics are
127
key_stat_version= file->s->key_stat_version-1;
129
return (file ? 0 : 1);
132
int ha_heap::close(void)
134
return internal_table ? hp_close(file) : heap_close(file);
139
Create a copy of this table
142
Do same as default implementation but use file->s->name instead of
143
table->s->path. This is needed by Windows where the clone() call sees
144
'/'-delimited path in table->s->path, while ha_peap::open() was called
145
with '\'-delimited path.
148
handler *ha_heap::clone(MEM_ROOT *mem_root)
150
handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type());
151
if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat,
152
HA_OPEN_IGNORE_IF_LOCKED))
154
return NULL; /* purecov: inspected */
159
Compute which keys to use for scanning
162
set_keys_for_scanning()
166
Set the bitmap btree_keys, which is used when the upper layers ask
167
which keys to use for scanning. For each btree index the
168
corresponding bit is set.
174
void ha_heap::set_keys_for_scanning(void)
176
btree_keys.clear_all();
177
for (uint i= 0 ; i < table->s->keys ; i++)
179
if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE)
180
btree_keys.set_bit(i);
185
void ha_heap::update_key_stats()
187
for (uint i= 0; i < table->s->keys; i++)
189
KEY *key=table->key_info+i;
190
if (!key->rec_per_key)
192
if (key->algorithm != HA_KEY_ALG_BTREE)
194
if (key->flags & HA_NOSAME)
195
key->rec_per_key[key->key_parts-1]= 1;
198
ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
199
uint no_records= hash_buckets ? (uint) (file->s->records/hash_buckets) : 2;
202
key->rec_per_key[key->key_parts-1]= no_records;
207
/* At the end of update_key_stats() we can proudly claim they are OK. */
208
key_stat_version= file->s->key_stat_version;
212
int ha_heap::write_row(uchar * buf)
215
ha_statistic_increment(&SSV::ha_write_count);
216
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
217
table->timestamp_field->set_time();
218
if (table->next_number_field && buf == table->record[0])
220
if ((res= update_auto_increment()))
223
res= heap_write(file,buf);
224
if (!res && (++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
228
We can perform this safely since only one writer at the time is
229
allowed on the table.
231
file->s->key_stat_version++;
236
int ha_heap::update_row(const uchar * old_data, uchar * new_data)
239
ha_statistic_increment(&SSV::ha_update_count);
240
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
241
table->timestamp_field->set_time();
242
res= heap_update(file,old_data,new_data);
243
if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD >
247
We can perform this safely since only one writer at the time is
248
allowed on the table.
250
file->s->key_stat_version++;
255
int ha_heap::delete_row(const uchar * buf)
258
ha_statistic_increment(&SSV::ha_delete_count);
259
res= heap_delete(file,buf);
260
if (!res && table->s->tmp_table == NO_TMP_TABLE &&
261
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records)
264
We can perform this safely since only one writer at the time is
265
allowed on the table.
267
file->s->key_stat_version++;
272
int ha_heap::index_read_map(uchar *buf, const uchar *key,
273
key_part_map keypart_map,
274
enum ha_rkey_function find_flag)
276
DBUG_ASSERT(inited==INDEX);
277
ha_statistic_increment(&SSV::ha_read_key_count);
278
int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag);
279
table->status = error ? STATUS_NOT_FOUND : 0;
283
int ha_heap::index_read_last_map(uchar *buf, const uchar *key,
284
key_part_map keypart_map)
286
DBUG_ASSERT(inited==INDEX);
287
ha_statistic_increment(&SSV::ha_read_key_count);
288
int error= heap_rkey(file, buf, active_index, key, keypart_map,
289
HA_READ_PREFIX_LAST);
290
table->status= error ? STATUS_NOT_FOUND : 0;
294
int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key,
295
key_part_map keypart_map,
296
enum ha_rkey_function find_flag)
298
ha_statistic_increment(&SSV::ha_read_key_count);
299
int error = heap_rkey(file, buf, index, key, keypart_map, find_flag);
300
table->status = error ? STATUS_NOT_FOUND : 0;
304
int ha_heap::index_next(uchar * buf)
306
DBUG_ASSERT(inited==INDEX);
307
ha_statistic_increment(&SSV::ha_read_next_count);
308
int error=heap_rnext(file,buf);
309
table->status=error ? STATUS_NOT_FOUND: 0;
313
int ha_heap::index_prev(uchar * buf)
315
DBUG_ASSERT(inited==INDEX);
316
ha_statistic_increment(&SSV::ha_read_prev_count);
317
int error=heap_rprev(file,buf);
318
table->status=error ? STATUS_NOT_FOUND: 0;
322
int ha_heap::index_first(uchar * buf)
324
DBUG_ASSERT(inited==INDEX);
325
ha_statistic_increment(&SSV::ha_read_first_count);
326
int error=heap_rfirst(file, buf, active_index);
327
table->status=error ? STATUS_NOT_FOUND: 0;
331
int ha_heap::index_last(uchar * buf)
333
DBUG_ASSERT(inited==INDEX);
334
ha_statistic_increment(&SSV::ha_read_last_count);
335
int error=heap_rlast(file, buf, active_index);
336
table->status=error ? STATUS_NOT_FOUND: 0;
340
int ha_heap::rnd_init(bool scan)
342
return scan ? heap_scan_init(file) : 0;
345
int ha_heap::rnd_next(uchar *buf)
347
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
348
int error=heap_scan(file, buf);
349
table->status=error ? STATUS_NOT_FOUND: 0;
353
int ha_heap::rnd_pos(uchar * buf, uchar *pos)
356
HEAP_PTR heap_position;
357
ha_statistic_increment(&SSV::ha_read_rnd_count);
358
memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR));
359
error=heap_rrnd(file, buf, heap_position);
360
table->status=error ? STATUS_NOT_FOUND: 0;
364
void ha_heap::position(const uchar *record)
366
*(HEAP_PTR*) ref= heap_position(file); // Ref is aligned
369
int ha_heap::info(uint flag)
372
(void) heap_info(file,&hp_info,flag);
374
errkey= hp_info.errkey;
375
stats.records= hp_info.records;
376
stats.deleted= hp_info.deleted;
377
stats.mean_rec_length= hp_info.reclength;
378
stats.data_file_length= hp_info.data_length;
379
stats.index_file_length= hp_info.index_length;
380
stats.max_data_file_length= hp_info.max_records * hp_info.reclength;
381
stats.delete_length= hp_info.deleted * hp_info.reclength;
382
if (flag & HA_STATUS_AUTO)
383
stats.auto_increment_value= hp_info.auto_increment;
385
If info() is called for the first time after open(), we will still
386
have to update the key statistics. Hoping that a table lock is now
389
if (key_stat_version != file->s->key_stat_version)
395
int ha_heap::extra(enum ha_extra_function operation)
397
return heap_extra(file,operation);
403
return heap_reset(file);
407
int ha_heap::delete_all_rows()
410
if (table->s->tmp_table == NO_TMP_TABLE)
413
We can perform this safely since only one writer at the time is
414
allowed on the table.
416
file->s->key_stat_version++;
421
int ha_heap::external_lock(THD *thd, int lock_type)
423
return 0; // No external locking
432
mode mode of operation:
433
HA_KEY_SWITCH_NONUNIQ disable all non-unique keys
434
HA_KEY_SWITCH_ALL disable all keys
435
HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent
436
HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent
439
Disable indexes and clear keys to use for scanning.
442
HA_KEY_SWITCH_NONUNIQ is not implemented.
443
HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP.
444
HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP.
448
HA_ERR_WRONG_COMMAND mode not implemented.
451
int ha_heap::disable_indexes(uint mode)
455
if (mode == HA_KEY_SWITCH_ALL)
457
if (!(error= heap_disable_indexes(file)))
458
set_keys_for_scanning();
462
/* mode not implemented */
463
error= HA_ERR_WRONG_COMMAND;
474
mode mode of operation:
475
HA_KEY_SWITCH_NONUNIQ enable all non-unique keys
476
HA_KEY_SWITCH_ALL enable all keys
477
HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent
478
HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent
481
Enable indexes and set keys to use for scanning.
482
The indexes might have been disabled by disable_index() before.
483
The function works only if both data and indexes are empty,
484
since the heap storage engine cannot repair the indexes.
485
To be sure, call handler::delete_all_rows() before.
488
HA_KEY_SWITCH_NONUNIQ is not implemented.
489
HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP.
490
HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP.
494
HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry.
495
HA_ERR_WRONG_COMMAND mode not implemented.
498
int ha_heap::enable_indexes(uint mode)
502
if (mode == HA_KEY_SWITCH_ALL)
504
if (!(error= heap_enable_indexes(file)))
505
set_keys_for_scanning();
509
/* mode not implemented */
510
error= HA_ERR_WRONG_COMMAND;
517
Test if indexes are disabled.
520
indexes_are_disabled()
524
0 indexes are not disabled
525
1 all indexes are disabled
526
[2 non-unique indexes are disabled - NOT YET IMPLEMENTED]
529
int ha_heap::indexes_are_disabled(void)
531
return heap_indexes_are_disabled(file);
534
THR_LOCK_DATA **ha_heap::store_lock(THD *thd,
536
enum thr_lock_type lock_type)
538
if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK)
539
file->lock.type=lock_type;
545
We have to ignore ENOENT entries as the HEAP table is created on open and
546
not when doing a CREATE on the table.
549
int ha_heap::delete_table(const char *name)
551
int error= heap_delete_table(name);
552
return error == ENOENT ? 0 : error;
556
void ha_heap::drop_table(const char *name)
558
file->s->delete_on_close= 1;
563
int ha_heap::rename_table(const char * from, const char * to)
565
return heap_rename(from,to);
569
ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
572
KEY *key=table->key_info+inx;
573
if (key->algorithm == HA_KEY_ALG_BTREE)
574
return hp_rb_records_in_range(file, inx, min_key, max_key);
576
if (!min_key || !max_key ||
577
min_key->length != max_key->length ||
578
min_key->length != key->key_length ||
579
min_key->flag != HA_READ_KEY_EXACT ||
580
max_key->flag != HA_READ_AFTER_KEY)
581
return HA_POS_ERROR; // Can only use exact keys
583
if (stats.records <= 1)
584
return stats.records;
586
/* Assert that info() did run. We need current statistics here. */
587
DBUG_ASSERT(key_stat_version == file->s->key_stat_version);
588
return key->rec_per_key[key->key_parts-1];
592
int ha_heap::create(const char *name, TABLE *table_arg,
593
HA_CREATE_INFO *create_info)
595
uint key, parts, mem_per_row= 0, keys= table_arg->s->keys;
596
uint auto_key= 0, auto_key_type= 0;
601
TABLE_SHARE *share= table_arg->s;
602
bool found_real_auto_increment= 0;
604
for (key= parts= 0; key < keys; key++)
605
parts+= table_arg->key_info[key].key_parts;
607
if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) +
608
parts * sizeof(HA_KEYSEG),
611
seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + keys);
612
for (key= 0; key < keys; key++)
614
KEY *pos= table_arg->key_info+key;
615
KEY_PART_INFO *key_part= pos->key_part;
616
KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
618
keydef[key].keysegs= (uint) pos->key_parts;
619
keydef[key].flag= (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL));
620
keydef[key].seg= seg;
622
switch (pos->algorithm) {
623
case HA_KEY_ALG_UNDEF:
624
case HA_KEY_ALG_HASH:
625
keydef[key].algorithm= HA_KEY_ALG_HASH;
626
mem_per_row+= sizeof(char*) * 2; // = sizeof(HASH_INFO)
628
case HA_KEY_ALG_BTREE:
629
keydef[key].algorithm= HA_KEY_ALG_BTREE;
630
mem_per_row+=sizeof(TREE_ELEMENT)+pos->key_length+sizeof(char*);
633
DBUG_ASSERT(0); // cannot happen
636
for (; key_part != key_part_end; key_part++, seg++)
638
Field *field= key_part->field;
640
if (pos->algorithm == HA_KEY_ALG_BTREE)
641
seg->type= field->key_type();
644
if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT &&
645
seg->type != HA_KEYTYPE_VARTEXT1 &&
646
seg->type != HA_KEYTYPE_VARTEXT2 &&
647
seg->type != HA_KEYTYPE_VARBINARY1 &&
648
seg->type != HA_KEYTYPE_VARBINARY2)
649
seg->type= HA_KEYTYPE_BINARY;
651
seg->start= (uint) key_part->offset;
652
seg->length= (uint) key_part->length;
653
seg->flag= key_part->key_part_flag;
655
if (field->flags & (ENUM_FLAG | SET_FLAG))
656
seg->charset= &my_charset_bin;
658
seg->charset= field->charset();
661
seg->null_bit= field->null_bit;
662
seg->null_pos= (uint) (field->null_ptr - (uchar*) table_arg->record[0]);
669
if (field->flags & AUTO_INCREMENT_FLAG &&
670
table_arg->found_next_number_field &&
671
key == share->next_number_index)
674
Store key number and type for found auto_increment key
675
We have to store type as seg->type can differ from it
678
auto_key_type= field->key_type();
682
mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));
683
max_rows = (ha_rows) (table_arg->in_use->variables.max_heap_table_size /
684
(uint64_t) mem_per_row);
685
if (table_arg->found_next_number_field)
687
keydef[share->next_number_index].flag|= HA_AUTO_KEY;
688
found_real_auto_increment= share->next_number_key_offset == 0;
690
HP_CREATE_INFO hp_create_info;
691
hp_create_info.auto_key= auto_key;
692
hp_create_info.auto_key_type= auto_key_type;
693
hp_create_info.auto_increment= (create_info->auto_increment_value ?
694
create_info->auto_increment_value - 1 : 0);
695
hp_create_info.max_table_size=current_thd->variables.max_heap_table_size;
696
hp_create_info.with_auto_increment= found_real_auto_increment;
697
hp_create_info.internal_table= internal_table;
698
max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row);
699
error= heap_create(name,
700
keys, keydef, share->reclength,
701
(ulong) ((share->max_rows < max_rows &&
703
share->max_rows : max_rows),
704
(ulong) share->min_rows, &hp_create_info, &internal_share);
705
my_free((uchar*) keydef, MYF(0));
706
DBUG_ASSERT(file == 0);
711
void ha_heap::update_create_info(HA_CREATE_INFO *create_info)
713
table->file->info(HA_STATUS_AUTO);
714
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
715
create_info->auto_increment_value= stats.auto_increment_value;
718
void ha_heap::get_auto_increment(uint64_t offset, uint64_t increment,
719
uint64_t nb_desired_values,
720
uint64_t *first_value,
721
uint64_t *nb_reserved_values)
723
ha_heap::info(HA_STATUS_AUTO);
724
*first_value= stats.auto_increment_value;
725
/* such table has only table-level locking so reserves up to +inf */
726
*nb_reserved_values= ULONGLONG_MAX;
730
bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info,
733
/* Check that auto_increment value was not changed */
734
if ((info->used_fields & HA_CREATE_USED_AUTO &&
735
info->auto_increment_value != 0) ||
736
table_changes == IS_EQUAL_NO ||
737
table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet
738
return COMPATIBLE_DATA_NO;
739
return COMPATIBLE_DATA_YES;
742
struct st_mysql_storage_engine heap_storage_engine=
743
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
745
mysql_declare_plugin(heap)
747
MYSQL_STORAGE_ENGINE_PLUGIN,
748
&heap_storage_engine,
751
"Hash based, stored in memory, useful for temporary tables",
756
NULL, /* status variables */
757
NULL, /* system variables */
758
NULL /* config options */
760
mysql_declare_plugin_end;