1
by brian
clean slate |
1 |
/* Copyright (C) 2000-2006 MySQL AB
|
2 |
||
3 |
This program is free software; you can redistribute it and/or modify
|
|
4 |
it under the terms of the GNU General Public License as published by
|
|
5 |
the Free Software Foundation; version 2 of the License.
|
|
6 |
||
7 |
This program is distributed in the hope that it will be useful,
|
|
8 |
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
9 |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
10 |
GNU General Public License for more details.
|
|
11 |
||
12 |
You should have received a copy of the GNU General Public License
|
|
13 |
along with this program; if not, write to the Free Software
|
|
14 |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
|
15 |
||
16 |
||
17 |
#ifdef USE_PRAGMA_IMPLEMENTATION
|
|
18 |
#pragma implementation // gcc: Class implementation |
|
19 |
#endif
|
|
20 |
||
21 |
#define MYSQL_SERVER 1
|
|
22 |
#include "mysql_priv.h" |
|
23 |
#include <mysql/plugin.h> |
|
24 |
#include "ha_heap.h" |
|
25 |
#include "heapdef.h" |
|
26 |
||
27 |
static handler *heap_create_handler(handlerton *hton, |
|
28 |
TABLE_SHARE *table, |
|
29 |
MEM_ROOT *mem_root); |
|
30 |
||
31 |
int heap_panic(handlerton *hton, ha_panic_function flag) |
|
32 |
{
|
|
33 |
return hp_panic(flag); |
|
34 |
}
|
|
35 |
||
36 |
||
37 |
int heap_init(void *p) |
|
38 |
{
|
|
39 |
handlerton *heap_hton; |
|
40 |
||
41 |
heap_hton= (handlerton *)p; |
|
42 |
heap_hton->state= SHOW_OPTION_YES; |
|
43 |
heap_hton->db_type= DB_TYPE_HEAP; |
|
44 |
heap_hton->create= heap_create_handler; |
|
45 |
heap_hton->panic= heap_panic; |
|
46 |
heap_hton->flags= HTON_CAN_RECREATE; |
|
47 |
||
48 |
return 0; |
|
49 |
}
|
|
50 |
||
51 |
static handler *heap_create_handler(handlerton *hton, |
|
52 |
TABLE_SHARE *table, |
|
53 |
MEM_ROOT *mem_root) |
|
54 |
{
|
|
55 |
return new (mem_root) ha_heap(hton, table); |
|
56 |
}
|
|
57 |
||
58 |
||
59 |
/*****************************************************************************
|
|
60 |
** HEAP tables
|
|
61 |
*****************************************************************************/
|
|
62 |
||
63 |
ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg) |
|
64 |
:handler(hton, table_arg), file(0), records_changed(0), key_stat_version(0), |
|
65 |
internal_table(0) |
|
66 |
{}
|
|
67 |
||
68 |
||
69 |
static const char *ha_heap_exts[] = { |
|
70 |
NullS
|
|
71 |
};
|
|
72 |
||
73 |
const char **ha_heap::bas_ext() const |
|
74 |
{
|
|
75 |
return ha_heap_exts; |
|
76 |
}
|
|
77 |
||
78 |
/*
|
|
79 |
Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to
|
|
80 |
rec_per_key) after 1/HEAP_STATS_UPDATE_THRESHOLD fraction of table records
|
|
81 |
have been inserted/updated/deleted. delete_all_rows() and table flush cause
|
|
82 |
immediate update.
|
|
83 |
||
84 |
NOTE
|
|
85 |
hash index statistics must be updated when number of table records changes
|
|
86 |
from 0 to non-zero value and vice versa. Otherwise records_in_range may
|
|
87 |
erroneously return 0 and 'range' may miss records.
|
|
88 |
*/
|
|
89 |
#define HEAP_STATS_UPDATE_THRESHOLD 10
|
|
90 |
||
91 |
int ha_heap::open(const char *name, int mode, uint test_if_locked) |
|
92 |
{
|
|
93 |
if ((test_if_locked & HA_OPEN_INTERNAL_TABLE) || (!(file= heap_open(name, mode)) && my_errno == ENOENT)) |
|
94 |
{
|
|
95 |
HA_CREATE_INFO create_info; |
|
96 |
internal_table= test(test_if_locked & HA_OPEN_INTERNAL_TABLE); |
|
97 |
bzero(&create_info, sizeof(create_info)); |
|
98 |
file= 0; |
|
99 |
if (!create(name, table, &create_info)) |
|
100 |
{
|
|
101 |
file= internal_table ? |
|
102 |
heap_open_from_share(internal_share, mode) : |
|
103 |
heap_open_from_share_and_register(internal_share, mode); |
|
104 |
if (!file) |
|
105 |
{
|
|
106 |
/* Couldn't open table; Remove the newly created table */
|
|
107 |
pthread_mutex_lock(&THR_LOCK_heap); |
|
108 |
hp_free(internal_share); |
|
109 |
pthread_mutex_unlock(&THR_LOCK_heap); |
|
110 |
}
|
|
111 |
implicit_emptied= 1; |
|
112 |
}
|
|
113 |
}
|
|
114 |
ref_length= sizeof(HEAP_PTR); |
|
115 |
if (file) |
|
116 |
{
|
|
117 |
/* Initialize variables for the opened table */
|
|
118 |
set_keys_for_scanning(); |
|
119 |
/*
|
|
120 |
We cannot run update_key_stats() here because we do not have a
|
|
121 |
lock on the table. The 'records' count might just be changed
|
|
122 |
temporarily at this moment and we might get wrong statistics (Bug
|
|
123 |
#10178). Instead we request for update. This will be done in
|
|
124 |
ha_heap::info(), which is always called before key statistics are
|
|
125 |
used.
|
|
126 |
*/
|
|
127 |
key_stat_version= file->s->key_stat_version-1; |
|
128 |
}
|
|
129 |
return (file ? 0 : 1); |
|
130 |
}
|
|
131 |
||
132 |
int ha_heap::close(void) |
|
133 |
{
|
|
134 |
return internal_table ? hp_close(file) : heap_close(file); |
|
135 |
}
|
|
136 |
||
137 |
||
138 |
/*
|
|
139 |
Create a copy of this table
|
|
140 |
||
141 |
DESCRIPTION
|
|
142 |
Do same as default implementation but use file->s->name instead of
|
|
143 |
table->s->path. This is needed by Windows where the clone() call sees
|
|
144 |
'/'-delimited path in table->s->path, while ha_peap::open() was called
|
|
145 |
with '\'-delimited path.
|
|
146 |
*/
|
|
147 |
||
148 |
handler *ha_heap::clone(MEM_ROOT *mem_root) |
|
149 |
{
|
|
150 |
handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); |
|
151 |
if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat, |
|
152 |
HA_OPEN_IGNORE_IF_LOCKED)) |
|
153 |
return new_handler; |
|
154 |
return NULL; /* purecov: inspected */ |
|
155 |
}
|
|
156 |
||
157 |
||
158 |
/*
|
|
159 |
Compute which keys to use for scanning
|
|
160 |
||
161 |
SYNOPSIS
|
|
162 |
set_keys_for_scanning()
|
|
163 |
no parameter
|
|
164 |
||
165 |
DESCRIPTION
|
|
166 |
Set the bitmap btree_keys, which is used when the upper layers ask
|
|
167 |
which keys to use for scanning. For each btree index the
|
|
168 |
corresponding bit is set.
|
|
169 |
||
170 |
RETURN
|
|
171 |
void
|
|
172 |
*/
|
|
173 |
||
174 |
void ha_heap::set_keys_for_scanning(void) |
|
175 |
{
|
|
176 |
btree_keys.clear_all(); |
|
177 |
for (uint i= 0 ; i < table->s->keys ; i++) |
|
178 |
{
|
|
179 |
if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE) |
|
180 |
btree_keys.set_bit(i); |
|
181 |
}
|
|
182 |
}
|
|
183 |
||
184 |
||
185 |
void ha_heap::update_key_stats() |
|
186 |
{
|
|
187 |
for (uint i= 0; i < table->s->keys; i++) |
|
188 |
{
|
|
189 |
KEY *key=table->key_info+i; |
|
190 |
if (!key->rec_per_key) |
|
191 |
continue; |
|
192 |
if (key->algorithm != HA_KEY_ALG_BTREE) |
|
193 |
{
|
|
194 |
if (key->flags & HA_NOSAME) |
|
195 |
key->rec_per_key[key->key_parts-1]= 1; |
|
196 |
else
|
|
197 |
{
|
|
198 |
ha_rows hash_buckets= file->s->keydef[i].hash_buckets; |
|
199 |
uint no_records= hash_buckets ? (uint) (file->s->records/hash_buckets) : 2; |
|
200 |
if (no_records < 2) |
|
201 |
no_records= 2; |
|
202 |
key->rec_per_key[key->key_parts-1]= no_records; |
|
203 |
}
|
|
204 |
}
|
|
205 |
}
|
|
206 |
records_changed= 0; |
|
207 |
/* At the end of update_key_stats() we can proudly claim they are OK. */
|
|
208 |
key_stat_version= file->s->key_stat_version; |
|
209 |
}
|
|
210 |
||
211 |
||
212 |
int ha_heap::write_row(uchar * buf) |
|
213 |
{
|
|
214 |
int res; |
|
215 |
ha_statistic_increment(&SSV::ha_write_count); |
|
216 |
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) |
|
217 |
table->timestamp_field->set_time(); |
|
218 |
if (table->next_number_field && buf == table->record[0]) |
|
219 |
{
|
|
220 |
if ((res= update_auto_increment())) |
|
221 |
return res; |
|
222 |
}
|
|
223 |
res= heap_write(file,buf); |
|
224 |
if (!res && (++records_changed*HEAP_STATS_UPDATE_THRESHOLD > |
|
225 |
file->s->records)) |
|
226 |
{
|
|
227 |
/*
|
|
228 |
We can perform this safely since only one writer at the time is
|
|
229 |
allowed on the table.
|
|
230 |
*/
|
|
231 |
file->s->key_stat_version++; |
|
232 |
}
|
|
233 |
return res; |
|
234 |
}
|
|
235 |
||
236 |
int ha_heap::update_row(const uchar * old_data, uchar * new_data) |
|
237 |
{
|
|
238 |
int res; |
|
239 |
ha_statistic_increment(&SSV::ha_update_count); |
|
240 |
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) |
|
241 |
table->timestamp_field->set_time(); |
|
242 |
res= heap_update(file,old_data,new_data); |
|
243 |
if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > |
|
244 |
file->s->records) |
|
245 |
{
|
|
246 |
/*
|
|
247 |
We can perform this safely since only one writer at the time is
|
|
248 |
allowed on the table.
|
|
249 |
*/
|
|
250 |
file->s->key_stat_version++; |
|
251 |
}
|
|
252 |
return res; |
|
253 |
}
|
|
254 |
||
255 |
int ha_heap::delete_row(const uchar * buf) |
|
256 |
{
|
|
257 |
int res; |
|
258 |
ha_statistic_increment(&SSV::ha_delete_count); |
|
259 |
res= heap_delete(file,buf); |
|
260 |
if (!res && table->s->tmp_table == NO_TMP_TABLE && |
|
261 |
++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) |
|
262 |
{
|
|
263 |
/*
|
|
264 |
We can perform this safely since only one writer at the time is
|
|
265 |
allowed on the table.
|
|
266 |
*/
|
|
267 |
file->s->key_stat_version++; |
|
268 |
}
|
|
269 |
return res; |
|
270 |
}
|
|
271 |
||
272 |
int ha_heap::index_read_map(uchar *buf, const uchar *key, |
|
273 |
key_part_map keypart_map, |
|
274 |
enum ha_rkey_function find_flag) |
|
275 |
{
|
|
276 |
DBUG_ASSERT(inited==INDEX); |
|
277 |
ha_statistic_increment(&SSV::ha_read_key_count); |
|
278 |
int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag); |
|
279 |
table->status = error ? STATUS_NOT_FOUND : 0; |
|
280 |
return error; |
|
281 |
}
|
|
282 |
||
283 |
int ha_heap::index_read_last_map(uchar *buf, const uchar *key, |
|
284 |
key_part_map keypart_map) |
|
285 |
{
|
|
286 |
DBUG_ASSERT(inited==INDEX); |
|
287 |
ha_statistic_increment(&SSV::ha_read_key_count); |
|
288 |
int error= heap_rkey(file, buf, active_index, key, keypart_map, |
|
289 |
HA_READ_PREFIX_LAST); |
|
290 |
table->status= error ? STATUS_NOT_FOUND : 0; |
|
291 |
return error; |
|
292 |
}
|
|
293 |
||
294 |
int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key, |
|
295 |
key_part_map keypart_map, |
|
296 |
enum ha_rkey_function find_flag) |
|
297 |
{
|
|
298 |
ha_statistic_increment(&SSV::ha_read_key_count); |
|
299 |
int error = heap_rkey(file, buf, index, key, keypart_map, find_flag); |
|
300 |
table->status = error ? STATUS_NOT_FOUND : 0; |
|
301 |
return error; |
|
302 |
}
|
|
303 |
||
304 |
int ha_heap::index_next(uchar * buf) |
|
305 |
{
|
|
306 |
DBUG_ASSERT(inited==INDEX); |
|
307 |
ha_statistic_increment(&SSV::ha_read_next_count); |
|
308 |
int error=heap_rnext(file,buf); |
|
309 |
table->status=error ? STATUS_NOT_FOUND: 0; |
|
310 |
return error; |
|
311 |
}
|
|
312 |
||
313 |
int ha_heap::index_prev(uchar * buf) |
|
314 |
{
|
|
315 |
DBUG_ASSERT(inited==INDEX); |
|
316 |
ha_statistic_increment(&SSV::ha_read_prev_count); |
|
317 |
int error=heap_rprev(file,buf); |
|
318 |
table->status=error ? STATUS_NOT_FOUND: 0; |
|
319 |
return error; |
|
320 |
}
|
|
321 |
||
322 |
int ha_heap::index_first(uchar * buf) |
|
323 |
{
|
|
324 |
DBUG_ASSERT(inited==INDEX); |
|
325 |
ha_statistic_increment(&SSV::ha_read_first_count); |
|
326 |
int error=heap_rfirst(file, buf, active_index); |
|
327 |
table->status=error ? STATUS_NOT_FOUND: 0; |
|
328 |
return error; |
|
329 |
}
|
|
330 |
||
331 |
int ha_heap::index_last(uchar * buf) |
|
332 |
{
|
|
333 |
DBUG_ASSERT(inited==INDEX); |
|
334 |
ha_statistic_increment(&SSV::ha_read_last_count); |
|
335 |
int error=heap_rlast(file, buf, active_index); |
|
336 |
table->status=error ? STATUS_NOT_FOUND: 0; |
|
337 |
return error; |
|
338 |
}
|
|
339 |
||
340 |
int ha_heap::rnd_init(bool scan) |
|
341 |
{
|
|
342 |
return scan ? heap_scan_init(file) : 0; |
|
343 |
}
|
|
344 |
||
345 |
int ha_heap::rnd_next(uchar *buf) |
|
346 |
{
|
|
347 |
ha_statistic_increment(&SSV::ha_read_rnd_next_count); |
|
348 |
int error=heap_scan(file, buf); |
|
349 |
table->status=error ? STATUS_NOT_FOUND: 0; |
|
350 |
return error; |
|
351 |
}
|
|
352 |
||
353 |
int ha_heap::rnd_pos(uchar * buf, uchar *pos) |
|
354 |
{
|
|
355 |
int error; |
|
356 |
HEAP_PTR heap_position; |
|
357 |
ha_statistic_increment(&SSV::ha_read_rnd_count); |
|
358 |
memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR)); |
|
359 |
error=heap_rrnd(file, buf, heap_position); |
|
360 |
table->status=error ? STATUS_NOT_FOUND: 0; |
|
361 |
return error; |
|
362 |
}
|
|
363 |
||
364 |
void ha_heap::position(const uchar *record) |
|
365 |
{
|
|
366 |
*(HEAP_PTR*) ref= heap_position(file); // Ref is aligned |
|
367 |
}
|
|
368 |
||
369 |
int ha_heap::info(uint flag) |
|
370 |
{
|
|
371 |
HEAPINFO hp_info; |
|
372 |
(void) heap_info(file,&hp_info,flag); |
|
373 |
||
374 |
errkey= hp_info.errkey; |
|
375 |
stats.records= hp_info.records; |
|
376 |
stats.deleted= hp_info.deleted; |
|
377 |
stats.mean_rec_length= hp_info.reclength; |
|
378 |
stats.data_file_length= hp_info.data_length; |
|
379 |
stats.index_file_length= hp_info.index_length; |
|
380 |
stats.max_data_file_length= hp_info.max_records * hp_info.reclength; |
|
381 |
stats.delete_length= hp_info.deleted * hp_info.reclength; |
|
382 |
if (flag & HA_STATUS_AUTO) |
|
383 |
stats.auto_increment_value= hp_info.auto_increment; |
|
384 |
/*
|
|
385 |
If info() is called for the first time after open(), we will still
|
|
386 |
have to update the key statistics. Hoping that a table lock is now
|
|
387 |
in place.
|
|
388 |
*/
|
|
389 |
if (key_stat_version != file->s->key_stat_version) |
|
390 |
update_key_stats(); |
|
391 |
return 0; |
|
392 |
}
|
|
393 |
||
394 |
||
395 |
int ha_heap::extra(enum ha_extra_function operation) |
|
396 |
{
|
|
397 |
return heap_extra(file,operation); |
|
398 |
}
|
|
399 |
||
400 |
||
401 |
int ha_heap::reset() |
|
402 |
{
|
|
403 |
return heap_reset(file); |
|
404 |
}
|
|
405 |
||
406 |
||
407 |
int ha_heap::delete_all_rows() |
|
408 |
{
|
|
409 |
heap_clear(file); |
|
410 |
if (table->s->tmp_table == NO_TMP_TABLE) |
|
411 |
{
|
|
412 |
/*
|
|
413 |
We can perform this safely since only one writer at the time is
|
|
414 |
allowed on the table.
|
|
415 |
*/
|
|
416 |
file->s->key_stat_version++; |
|
417 |
}
|
|
418 |
return 0; |
|
419 |
}
|
|
420 |
||
421 |
int ha_heap::external_lock(THD *thd, int lock_type) |
|
422 |
{
|
|
423 |
return 0; // No external locking |
|
424 |
}
|
|
425 |
||
426 |
||
427 |
/*
|
|
428 |
Disable indexes.
|
|
429 |
||
430 |
SYNOPSIS
|
|
431 |
disable_indexes()
|
|
432 |
mode mode of operation:
|
|
433 |
HA_KEY_SWITCH_NONUNIQ disable all non-unique keys
|
|
434 |
HA_KEY_SWITCH_ALL disable all keys
|
|
435 |
HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent
|
|
436 |
HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent
|
|
437 |
||
438 |
DESCRIPTION
|
|
439 |
Disable indexes and clear keys to use for scanning.
|
|
440 |
||
441 |
IMPLEMENTATION
|
|
442 |
HA_KEY_SWITCH_NONUNIQ is not implemented.
|
|
443 |
HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP.
|
|
444 |
HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP.
|
|
445 |
||
446 |
RETURN
|
|
447 |
0 ok
|
|
448 |
HA_ERR_WRONG_COMMAND mode not implemented.
|
|
449 |
*/
|
|
450 |
||
451 |
int ha_heap::disable_indexes(uint mode) |
|
452 |
{
|
|
453 |
int error; |
|
454 |
||
455 |
if (mode == HA_KEY_SWITCH_ALL) |
|
456 |
{
|
|
457 |
if (!(error= heap_disable_indexes(file))) |
|
458 |
set_keys_for_scanning(); |
|
459 |
}
|
|
460 |
else
|
|
461 |
{
|
|
462 |
/* mode not implemented */
|
|
463 |
error= HA_ERR_WRONG_COMMAND; |
|
464 |
}
|
|
465 |
return error; |
|
466 |
}
|
|
467 |
||
468 |
||
469 |
/*
|
|
470 |
Enable indexes.
|
|
471 |
||
472 |
SYNOPSIS
|
|
473 |
enable_indexes()
|
|
474 |
mode mode of operation:
|
|
475 |
HA_KEY_SWITCH_NONUNIQ enable all non-unique keys
|
|
476 |
HA_KEY_SWITCH_ALL enable all keys
|
|
477 |
HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent
|
|
478 |
HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent
|
|
479 |
||
480 |
DESCRIPTION
|
|
481 |
Enable indexes and set keys to use for scanning.
|
|
482 |
The indexes might have been disabled by disable_index() before.
|
|
483 |
The function works only if both data and indexes are empty,
|
|
484 |
since the heap storage engine cannot repair the indexes.
|
|
485 |
To be sure, call handler::delete_all_rows() before.
|
|
486 |
||
487 |
IMPLEMENTATION
|
|
488 |
HA_KEY_SWITCH_NONUNIQ is not implemented.
|
|
489 |
HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP.
|
|
490 |
HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP.
|
|
491 |
||
492 |
RETURN
|
|
493 |
0 ok
|
|
494 |
HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry.
|
|
495 |
HA_ERR_WRONG_COMMAND mode not implemented.
|
|
496 |
*/
|
|
497 |
||
498 |
int ha_heap::enable_indexes(uint mode) |
|
499 |
{
|
|
500 |
int error; |
|
501 |
||
502 |
if (mode == HA_KEY_SWITCH_ALL) |
|
503 |
{
|
|
504 |
if (!(error= heap_enable_indexes(file))) |
|
505 |
set_keys_for_scanning(); |
|
506 |
}
|
|
507 |
else
|
|
508 |
{
|
|
509 |
/* mode not implemented */
|
|
510 |
error= HA_ERR_WRONG_COMMAND; |
|
511 |
}
|
|
512 |
return error; |
|
513 |
}
|
|
514 |
||
515 |
||
516 |
/*
|
|
517 |
Test if indexes are disabled.
|
|
518 |
||
519 |
SYNOPSIS
|
|
520 |
indexes_are_disabled()
|
|
521 |
no parameters
|
|
522 |
||
523 |
RETURN
|
|
524 |
0 indexes are not disabled
|
|
525 |
1 all indexes are disabled
|
|
526 |
[2 non-unique indexes are disabled - NOT YET IMPLEMENTED]
|
|
527 |
*/
|
|
528 |
||
529 |
int ha_heap::indexes_are_disabled(void) |
|
530 |
{
|
|
531 |
return heap_indexes_are_disabled(file); |
|
532 |
}
|
|
533 |
||
534 |
THR_LOCK_DATA **ha_heap::store_lock(THD *thd, |
|
535 |
THR_LOCK_DATA **to, |
|
536 |
enum thr_lock_type lock_type) |
|
537 |
{
|
|
538 |
if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK) |
|
539 |
file->lock.type=lock_type; |
|
540 |
*to++= &file->lock; |
|
541 |
return to; |
|
542 |
}
|
|
543 |
||
544 |
/*
|
|
545 |
We have to ignore ENOENT entries as the HEAP table is created on open and
|
|
546 |
not when doing a CREATE on the table.
|
|
547 |
*/
|
|
548 |
||
549 |
int ha_heap::delete_table(const char *name) |
|
550 |
{
|
|
551 |
int error= heap_delete_table(name); |
|
552 |
return error == ENOENT ? 0 : error; |
|
553 |
}
|
|
554 |
||
555 |
||
556 |
void ha_heap::drop_table(const char *name) |
|
557 |
{
|
|
558 |
file->s->delete_on_close= 1; |
|
559 |
close(); |
|
560 |
}
|
|
561 |
||
562 |
||
563 |
int ha_heap::rename_table(const char * from, const char * to) |
|
564 |
{
|
|
565 |
return heap_rename(from,to); |
|
566 |
}
|
|
567 |
||
568 |
||
569 |
ha_rows ha_heap::records_in_range(uint inx, key_range *min_key, |
|
570 |
key_range *max_key) |
|
571 |
{
|
|
572 |
KEY *key=table->key_info+inx; |
|
573 |
if (key->algorithm == HA_KEY_ALG_BTREE) |
|
574 |
return hp_rb_records_in_range(file, inx, min_key, max_key); |
|
575 |
||
576 |
if (!min_key || !max_key || |
|
577 |
min_key->length != max_key->length || |
|
578 |
min_key->length != key->key_length || |
|
579 |
min_key->flag != HA_READ_KEY_EXACT || |
|
580 |
max_key->flag != HA_READ_AFTER_KEY) |
|
581 |
return HA_POS_ERROR; // Can only use exact keys |
|
582 |
||
583 |
if (stats.records <= 1) |
|
584 |
return stats.records; |
|
585 |
||
586 |
/* Assert that info() did run. We need current statistics here. */
|
|
587 |
DBUG_ASSERT(key_stat_version == file->s->key_stat_version); |
|
588 |
return key->rec_per_key[key->key_parts-1]; |
|
589 |
}
|
|
590 |
||
591 |
||
592 |
int ha_heap::create(const char *name, TABLE *table_arg, |
|
593 |
HA_CREATE_INFO *create_info) |
|
594 |
{
|
|
595 |
uint key, parts, mem_per_row= 0, keys= table_arg->s->keys; |
|
596 |
uint auto_key= 0, auto_key_type= 0; |
|
597 |
ha_rows max_rows; |
|
598 |
HP_KEYDEF *keydef; |
|
599 |
HA_KEYSEG *seg; |
|
600 |
int error; |
|
601 |
TABLE_SHARE *share= table_arg->s; |
|
602 |
bool found_real_auto_increment= 0; |
|
603 |
||
604 |
for (key= parts= 0; key < keys; key++) |
|
605 |
parts+= table_arg->key_info[key].key_parts; |
|
606 |
||
607 |
if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) + |
|
608 |
parts * sizeof(HA_KEYSEG), |
|
609 |
MYF(MY_WME)))) |
|
610 |
return my_errno; |
|
611 |
seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + keys); |
|
612 |
for (key= 0; key < keys; key++) |
|
613 |
{
|
|
614 |
KEY *pos= table_arg->key_info+key; |
|
615 |
KEY_PART_INFO *key_part= pos->key_part; |
|
616 |
KEY_PART_INFO *key_part_end= key_part + pos->key_parts; |
|
617 |
||
618 |
keydef[key].keysegs= (uint) pos->key_parts; |
|
619 |
keydef[key].flag= (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL)); |
|
620 |
keydef[key].seg= seg; |
|
621 |
||
622 |
switch (pos->algorithm) { |
|
623 |
case HA_KEY_ALG_UNDEF: |
|
624 |
case HA_KEY_ALG_HASH: |
|
625 |
keydef[key].algorithm= HA_KEY_ALG_HASH; |
|
626 |
mem_per_row+= sizeof(char*) * 2; // = sizeof(HASH_INFO) |
|
627 |
break; |
|
628 |
case HA_KEY_ALG_BTREE: |
|
629 |
keydef[key].algorithm= HA_KEY_ALG_BTREE; |
|
630 |
mem_per_row+=sizeof(TREE_ELEMENT)+pos->key_length+sizeof(char*); |
|
631 |
break; |
|
632 |
default: |
|
633 |
DBUG_ASSERT(0); // cannot happen |
|
634 |
}
|
|
635 |
||
636 |
for (; key_part != key_part_end; key_part++, seg++) |
|
637 |
{
|
|
638 |
Field *field= key_part->field; |
|
639 |
||
640 |
if (pos->algorithm == HA_KEY_ALG_BTREE) |
|
641 |
seg->type= field->key_type(); |
|
642 |
else
|
|
643 |
{
|
|
644 |
if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT && |
|
645 |
seg->type != HA_KEYTYPE_VARTEXT1 && |
|
646 |
seg->type != HA_KEYTYPE_VARTEXT2 && |
|
647 |
seg->type != HA_KEYTYPE_VARBINARY1 && |
|
648 |
seg->type != HA_KEYTYPE_VARBINARY2) |
|
649 |
seg->type= HA_KEYTYPE_BINARY; |
|
650 |
}
|
|
651 |
seg->start= (uint) key_part->offset; |
|
652 |
seg->length= (uint) key_part->length; |
|
653 |
seg->flag= key_part->key_part_flag; |
|
654 |
||
655 |
if (field->flags & (ENUM_FLAG | SET_FLAG)) |
|
656 |
seg->charset= &my_charset_bin; |
|
657 |
else
|
|
658 |
seg->charset= field->charset(); |
|
659 |
if (field->null_ptr) |
|
660 |
{
|
|
661 |
seg->null_bit= field->null_bit; |
|
662 |
seg->null_pos= (uint) (field->null_ptr - (uchar*) table_arg->record[0]); |
|
663 |
}
|
|
664 |
else
|
|
665 |
{
|
|
666 |
seg->null_bit= 0; |
|
667 |
seg->null_pos= 0; |
|
668 |
}
|
|
669 |
if (field->flags & AUTO_INCREMENT_FLAG && |
|
670 |
table_arg->found_next_number_field && |
|
671 |
key == share->next_number_index) |
|
672 |
{
|
|
673 |
/*
|
|
674 |
Store key number and type for found auto_increment key
|
|
675 |
We have to store type as seg->type can differ from it
|
|
676 |
*/
|
|
677 |
auto_key= key+ 1; |
|
678 |
auto_key_type= field->key_type(); |
|
679 |
}
|
|
680 |
}
|
|
681 |
}
|
|
682 |
mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*)); |
|
683 |
max_rows = (ha_rows) (table_arg->in_use->variables.max_heap_table_size / |
|
684 |
(uint64_t) mem_per_row); |
|
685 |
if (table_arg->found_next_number_field) |
|
686 |
{
|
|
687 |
keydef[share->next_number_index].flag|= HA_AUTO_KEY; |
|
688 |
found_real_auto_increment= share->next_number_key_offset == 0; |
|
689 |
}
|
|
690 |
HP_CREATE_INFO hp_create_info; |
|
691 |
hp_create_info.auto_key= auto_key; |
|
692 |
hp_create_info.auto_key_type= auto_key_type; |
|
693 |
hp_create_info.auto_increment= (create_info->auto_increment_value ? |
|
694 |
create_info->auto_increment_value - 1 : 0); |
|
695 |
hp_create_info.max_table_size=current_thd->variables.max_heap_table_size; |
|
696 |
hp_create_info.with_auto_increment= found_real_auto_increment; |
|
697 |
hp_create_info.internal_table= internal_table; |
|
698 |
max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row); |
|
699 |
error= heap_create(name, |
|
700 |
keys, keydef, share->reclength, |
|
701 |
(ulong) ((share->max_rows < max_rows && |
|
702 |
share->max_rows) ? |
|
703 |
share->max_rows : max_rows), |
|
704 |
(ulong) share->min_rows, &hp_create_info, &internal_share); |
|
705 |
my_free((uchar*) keydef, MYF(0)); |
|
706 |
DBUG_ASSERT(file == 0); |
|
707 |
return (error); |
|
708 |
}
|
|
709 |
||
710 |
||
711 |
void ha_heap::update_create_info(HA_CREATE_INFO *create_info) |
|
712 |
{
|
|
713 |
table->file->info(HA_STATUS_AUTO); |
|
714 |
if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) |
|
715 |
create_info->auto_increment_value= stats.auto_increment_value; |
|
716 |
}
|
|
717 |
||
718 |
void ha_heap::get_auto_increment(uint64_t offset, uint64_t increment, |
|
719 |
uint64_t nb_desired_values, |
|
720 |
uint64_t *first_value, |
|
721 |
uint64_t *nb_reserved_values) |
|
722 |
{
|
|
723 |
ha_heap::info(HA_STATUS_AUTO); |
|
724 |
*first_value= stats.auto_increment_value; |
|
725 |
/* such table has only table-level locking so reserves up to +inf */
|
|
726 |
*nb_reserved_values= ULONGLONG_MAX; |
|
727 |
}
|
|
728 |
||
729 |
||
730 |
bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info, |
|
731 |
uint table_changes) |
|
732 |
{
|
|
733 |
/* Check that auto_increment value was not changed */
|
|
734 |
if ((info->used_fields & HA_CREATE_USED_AUTO && |
|
735 |
info->auto_increment_value != 0) || |
|
736 |
table_changes == IS_EQUAL_NO || |
|
737 |
table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet |
|
738 |
return COMPATIBLE_DATA_NO; |
|
739 |
return COMPATIBLE_DATA_YES; |
|
740 |
}
|
|
741 |
||
742 |
struct st_mysql_storage_engine heap_storage_engine= |
|
743 |
{ MYSQL_HANDLERTON_INTERFACE_VERSION }; |
|
744 |
||
745 |
mysql_declare_plugin(heap) |
|
746 |
{
|
|
747 |
MYSQL_STORAGE_ENGINE_PLUGIN, |
|
748 |
&heap_storage_engine, |
|
749 |
"MEMORY", |
|
750 |
"MySQL AB", |
|
751 |
"Hash based, stored in memory, useful for temporary tables", |
|
752 |
PLUGIN_LICENSE_GPL, |
|
753 |
heap_init, |
|
754 |
NULL, |
|
755 |
0x0100, /* 1.0 */ |
|
756 |
NULL, /* status variables */ |
|
757 |
NULL, /* system variables */ |
|
758 |
NULL /* config options */ |
|
759 |
}
|
|
760 |
mysql_declare_plugin_end; |