40
40
only. And it is sufficient to calculate the checksum once only.
43
#include "myisam_priv.h"
44
#include <drizzled/internal/m_string.h>
43
#include "myisamdef.h"
45
44
#include <stdarg.h>
45
#include <mysys/my_getopt.h>
46
46
#ifdef HAVE_SYS_VADVISE_H
47
47
#include <sys/vadvise.h>
50
#include <sys/types.h>
52
49
#ifdef HAVE_SYS_MMAN_H
53
50
#include <sys/mman.h>
55
#include <drizzled/util/test.h>
56
#include <drizzled/error.h>
61
using namespace drizzled;
62
using namespace drizzled::internal;
65
#define my_off_t2double(A) ((double) (my_off_t) (A))
67
/* Functions defined in this file */
69
static int check_k_link(MI_CHECK *param, MI_INFO *info,uint32_t nr);
54
#define my_raid_create(A,B,C,D,E,F,G) my_create(A,B,C,G)
55
#define my_raid_delete(A,B,C) my_delete(A,B)
58
/* Functions defined in this file */
60
static int check_k_link(MI_CHECK *param, MI_INFO *info,uint nr);
70
61
static int chk_index(MI_CHECK *param, MI_INFO *info,MI_KEYDEF *keyinfo,
71
my_off_t page, unsigned char *buff, ha_rows *keys,
72
ha_checksum *key_checksum, uint32_t level);
73
static uint32_t isam_key_length(MI_INFO *info,MI_KEYDEF *keyinfo);
62
my_off_t page, uchar *buff, ha_rows *keys,
63
ha_checksum *key_checksum, uint level);
64
static uint isam_key_length(MI_INFO *info,MI_KEYDEF *keyinfo);
74
65
static ha_checksum calc_checksum(ha_rows count);
75
66
static int writekeys(MI_SORT_PARAM *sort_param);
76
67
static int sort_one_index(MI_CHECK *param, MI_INFO *info,MI_KEYDEF *keyinfo,
77
my_off_t pagepos, int new_file);
78
int sort_key_read(MI_SORT_PARAM *sort_param,void *key);
79
int sort_get_next_record(MI_SORT_PARAM *sort_param);
80
int sort_key_cmp(MI_SORT_PARAM *sort_param, const void *a,const void *b);
81
int sort_key_write(MI_SORT_PARAM *sort_param, const void *a);
82
my_off_t get_record_for_key(MI_INFO *info,MI_KEYDEF *keyinfo,
84
int sort_insert_key(MI_SORT_PARAM *sort_param,
85
register SORT_KEY_BLOCKS *key_block,
86
unsigned char *key, my_off_t prev_block);
87
int sort_delete_record(MI_SORT_PARAM *sort_param);
68
my_off_t pagepos, File new_file);
69
static int sort_key_read(MI_SORT_PARAM *sort_param,void *key);
70
static int sort_get_next_record(MI_SORT_PARAM *sort_param);
71
static int sort_key_cmp(MI_SORT_PARAM *sort_param, const void *a,const void *b);
72
static int sort_key_write(MI_SORT_PARAM *sort_param, const void *a);
73
static my_off_t get_record_for_key(MI_INFO *info,MI_KEYDEF *keyinfo,
75
static int sort_insert_key(MI_SORT_PARAM *sort_param,
76
register SORT_KEY_BLOCKS *key_block,
77
uchar *key, my_off_t prev_block);
78
static int sort_delete_record(MI_SORT_PARAM *sort_param);
89
79
/*static int flush_pending_blocks(MI_CHECK *param);*/
90
static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint32_t blocks,
91
uint32_t buffer_length);
92
static ha_checksum mi_byte_checksum(const unsigned char *buf, uint32_t length);
80
static SORT_KEY_BLOCKS *alloc_key_blocks(MI_CHECK *param, uint blocks,
82
static ha_checksum mi_byte_checksum(const uchar *buf, uint length);
93
83
static void set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share);
95
85
void myisamchk_init(MI_CHECK *param)
97
memset(param, 0, sizeof(*param));
87
memset((uchar*) param, 0, sizeof(*param));
98
88
param->opt_follow_links=1;
99
89
param->keys_in_use= ~(uint64_t) 0;
100
90
param->search_after_block=HA_OFFSET_ERROR;
2285
2338
memcpy( &share->state.state, info->state, sizeof(*info->state));
2288
got_error|= flush_blocks(param, share->getKeyCache(), share->kfile);
2289
info->rec_cache.end_io_cache();
2292
/* Replace the actual file with the temporary file */
2295
internal::my_close(new_file,MYF(0));
2296
info->dfile=new_file= -1;
2297
if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
2298
DATA_TMP_EXT, share->base.raid_chunks,
2299
(param->testflag & T_BACKUP_DATA ?
2300
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
2301
mi_open_datafile(info,share,-1))
2307
if (! param->error_printed)
2308
mi_check_print_error(param,"%d when fixing table",errno);
2311
internal::my_close(new_file,MYF(0));
2312
my_delete(param->temp_filename, MYF(MY_WME));
2313
if (info->dfile == new_file)
2316
mi_mark_crashed_on_repair(info);
2318
else if (key_map == share->state.key_map)
2319
share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
2320
share->state.changed|=STATE_NOT_SORTED_PAGES;
2322
void * rec_buff_ptr= NULL;
2323
rec_buff_ptr= mi_get_rec_buff_ptr(info, sort_param.rec_buff);
2324
if (rec_buff_ptr != NULL)
2326
rec_buff_ptr= mi_get_rec_buff_ptr(info, sort_param.record);
2327
if (rec_buff_ptr != NULL)
2331
free((unsigned char*) sort_info.key_block);
2332
free(sort_info.buff);
2333
param->read_cache.end_io_cache();
2334
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
2335
if (!got_error && (param->testflag & T_UNPACK))
2337
share->state.header.options[0]&= (unsigned char) ~HA_OPTION_COMPRESS_RECORD;
2341
got_error|= flush_blocks(param, share->key_cache, share->kfile);
2342
VOID(end_io_cache(&info->rec_cache));
2345
/* Replace the actual file with the temporary file */
2348
my_close(new_file,MYF(0));
2349
info->dfile=new_file= -1;
2350
if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
2351
DATA_TMP_EXT, share->base.raid_chunks,
2352
(param->testflag & T_BACKUP_DATA ?
2353
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
2354
mi_open_datafile(info,share,-1))
2360
if (! param->error_printed)
2361
mi_check_print_error(param,"%d when fixing table",my_errno);
2364
VOID(my_close(new_file,MYF(0)));
2365
VOID(my_raid_delete(param->temp_filename,share->base.raid_chunks,
2367
if (info->dfile == new_file)
2370
mi_mark_crashed_on_repair(info);
2372
else if (key_map == share->state.key_map)
2373
share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
2374
share->state.changed|=STATE_NOT_SORTED_PAGES;
2376
my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff),
2377
MYF(MY_ALLOW_ZERO_PTR));
2378
my_free(mi_get_rec_buff_ptr(info, sort_param.record),
2379
MYF(MY_ALLOW_ZERO_PTR));
2380
my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
2381
my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
2382
VOID(end_io_cache(¶m->read_cache));
2383
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
2384
if (!got_error && (param->testflag & T_UNPACK))
2386
share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
2387
share->pack.header_length=0;
2393
Threaded repair of table using sorting
2396
mi_repair_parallel()
2397
param Repair parameters
2398
info MyISAM handler to repair
2399
name Name of table (for warnings)
2400
rep_quick set to <> 0 if we should not change data file
2403
Same as mi_repair_by_sort but do it multithreaded
2404
Each key is handled by a separate thread.
2405
TODO: make a number of threads a parameter
2407
In parallel repair we use one thread per index. There are two modes:
2411
Only the indexes are rebuilt. All threads share a read buffer.
2412
Every thread that needs fresh data in the buffer enters the shared
2413
cache lock. The last thread joining the lock reads the buffer from
2414
the data file and wakes all other threads.
2418
The data file is rebuilt and all indexes are rebuilt to point to
2419
the new record positions. One thread is the master thread. It
2420
reads from the old data file and writes to the new data file. It
2421
also creates one of the indexes. The other threads read from a
2422
buffer which is filled by the master. If they need fresh data,
2423
they enter the shared cache lock. If the masters write buffer is
2424
full, it flushes it to the new data file and enters the shared
2425
cache lock too. When all threads joined in the lock, the master
2426
copies its write buffer to the read buffer for the other threads
2434
int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
2435
const char * name, int rep_quick)
2438
uint i,key, total_key_length, istep;
2440
ha_rows start_records;
2441
my_off_t new_header_length,del;
2443
MI_SORT_PARAM *sort_param=0;
2444
MYISAM_SHARE *share=info->s;
2445
ulong *rec_per_key_part;
2448
IO_CACHE new_data_cache; /* For non-quick repair. */
2449
IO_CACHE_SHARE io_share;
2450
SORT_INFO sort_info;
2451
uint64_t key_map= 0;
2452
pthread_attr_t thr_attr;
2453
ulong max_pack_reclength;
2455
start_records=info->state->records;
2458
new_header_length=(param->testflag & T_UNPACK) ? 0 :
2459
share->pack.header_length;
2460
if (!(param->testflag & T_SILENT))
2462
printf("- parallel recovering (with sort) MyISAM-table '%s'\n",name);
2463
printf("Data records: %s\n", llstr(start_records,llbuff));
2465
param->testflag|=T_REP; /* for easy checking */
2467
if (info->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))
2468
param->testflag|=T_CALC_CHECKSUM;
2471
Quick repair (not touching data file, rebuilding indexes):
2473
Read cache is (MI_CHECK *param)->read_cache using info->dfile.
2476
Non-quick repair (rebuilding data file and indexes):
2480
Read cache is (MI_CHECK *param)->read_cache using info->dfile.
2481
Write cache is (MI_INFO *info)->rec_cache using new_file.
2485
Read cache is new_data_cache synced to master rec_cache.
2487
The final assignment of the filedescriptor for rec_cache is done
2488
after the cache creation.
2490
Don't check file size on new_data_cache, as the resulting file size
2493
As rec_cache and new_data_cache are synced, write_buffer_length is
2494
used for the read cache 'new_data_cache'. Both start at the same
2495
position 'new_header_length'.
2498
memset((char*)&sort_info, 0, sizeof(sort_info));
2499
/* Initialize pthread structures before goto err. */
2500
pthread_mutex_init(&sort_info.mutex, MY_MUTEX_INIT_FAST);
2501
pthread_cond_init(&sort_info.cond, 0);
2503
if (!(sort_info.key_block=
2504
alloc_key_blocks(param, (uint) param->sort_key_blocks,
2505
share->base.max_key_block_length)) ||
2506
init_io_cache(¶m->read_cache, info->dfile,
2507
(uint) param->read_buffer_length,
2508
READ_CACHE, share->pack.header_length, 1, MYF(MY_WME)) ||
2510
(init_io_cache(&info->rec_cache, info->dfile,
2511
(uint) param->write_buffer_length,
2512
WRITE_CACHE, new_header_length, 1,
2513
MYF(MY_WME | MY_WAIT_IF_FULL) & param->myf_rw) ||
2514
init_io_cache(&new_data_cache, -1,
2515
(uint) param->write_buffer_length,
2516
READ_CACHE, new_header_length, 1,
2517
MYF(MY_WME | MY_DONT_CHECK_FILESIZE)))))
2519
sort_info.key_block_end=sort_info.key_block+param->sort_key_blocks;
2520
info->opt_flag|=WRITE_CACHE_USED;
2521
info->rec_cache.file=info->dfile; /* for sort_delete_record */
2525
/* Get real path for data file */
2526
if ((new_file=my_raid_create(fn_format(param->temp_filename,
2527
share->data_file_name, "",
2530
0,param->tmpfile_createflag,
2531
share->base.raid_type,
2532
share->base.raid_chunks,
2533
share->base.raid_chunksize,
2536
mi_check_print_error(param,"Can't create new tempfile: '%s'",
2537
param->temp_filename);
2540
if (new_header_length &&
2541
filecopy(param, new_file,info->dfile,0L,new_header_length,
2544
if (param->testflag & T_UNPACK)
2546
share->options&= ~HA_OPTION_COMPRESS_RECORD;
2547
mi_int2store(share->state.header.options,share->options);
2549
share->state.dellink= HA_OFFSET_ERROR;
2550
info->rec_cache.file=new_file;
2553
info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
2555
/* Optionally drop indexes and optionally modify the key_map. */
2556
mi_drop_all_indexes(param, info, false);
2557
key_map= share->state.key_map;
2558
if (param->testflag & T_CREATE_MISSING_KEYS)
2560
/* Invert the copied key_map to recreate all disabled indexes. */
2564
sort_info.info=info;
2565
sort_info.param = param;
2567
set_data_file_type(&sort_info, share);
2570
param->read_cache.end_of_file=sort_info.filelength=
2571
my_seek(param->read_cache.file,0L,MY_SEEK_END,MYF(0));
2573
if (share->data_file_type == DYNAMIC_RECORD)
2574
rec_length=max(share->base.min_pack_length+1,share->base.min_block_length);
2575
else if (share->data_file_type == COMPRESSED_RECORD)
2576
rec_length=share->base.min_block_length;
2578
rec_length=share->base.pack_reclength;
2580
+1 below is required hack for parallel repair mode.
2581
The info->state->records value, that is compared later
2582
to sort_info.max_records and cannot exceed it, is
2583
increased in sort_key_write. In mi_repair_by_sort, sort_key_write
2584
is called after sort_key_read, where the comparison is performed,
2585
but in parallel mode master thread can call sort_key_write
2586
before some other repair thread calls sort_key_read.
2587
Furthermore I'm not even sure +1 would be enough.
2588
May be sort_info.max_records shold be always set to max value in
2591
sort_info.max_records=
2592
((param->testflag & T_CREATE_MISSING_KEYS) ? info->state->records + 1:
2593
(ha_rows) (sort_info.filelength/rec_length+1));
2595
del=info->state->del;
2597
/* for compressed tables */
2598
max_pack_reclength= share->base.pack_reclength;
2599
if (share->options & HA_OPTION_COMPRESS_RECORD)
2600
set_if_bigger(max_pack_reclength, share->max_pack_length);
2601
if (!(sort_param=(MI_SORT_PARAM *)
2602
my_malloc((uint) share->base.keys *
2603
(sizeof(MI_SORT_PARAM) + max_pack_reclength),
2606
mi_check_print_error(param,"Not enough memory for key!");
2610
rec_per_key_part= param->rec_per_key_part;
2611
info->state->records=info->state->del=share->state.split=0;
2612
info->state->empty=0;
2614
for (i=key=0, istep=1 ; key < share->base.keys ;
2615
rec_per_key_part+=sort_param[i].keyinfo->keysegs, i+=istep, key++)
2617
sort_param[i].key=key;
2618
sort_param[i].keyinfo=share->keyinfo+key;
2619
sort_param[i].seg=sort_param[i].keyinfo->seg;
2621
Skip this index if it is marked disabled in the copied
2622
(and possibly inverted) key_map.
2624
if (! mi_is_key_active(key_map, key))
2626
/* Remember old statistics for key */
2627
memcpy((char*) rec_per_key_part,
2628
(char*) (share->state.rec_per_key_part+
2629
(uint) (rec_per_key_part - param->rec_per_key_part)),
2630
sort_param[i].keyinfo->keysegs*sizeof(*rec_per_key_part));
2635
if ((!(param->testflag & T_SILENT)))
2636
printf ("- Fixing index %d\n",key+1);
2638
sort_param[i].key_read=sort_key_read;
2639
sort_param[i].key_write=sort_key_write;
2641
sort_param[i].key_cmp=sort_key_cmp;
2642
sort_param[i].lock_in_memory=lock_memory;
2643
sort_param[i].tmpdir=param->tmpdir;
2644
sort_param[i].sort_info=&sort_info;
2645
sort_param[i].master=0;
2646
sort_param[i].fix_datafile=0;
2647
sort_param[i].calc_checksum= 0;
2649
sort_param[i].filepos=new_header_length;
2650
sort_param[i].max_pos=sort_param[i].pos=share->pack.header_length;
2652
sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+
2653
(max_pack_reclength * i));
2654
if (!mi_alloc_rec_buff(info, -1, &sort_param[i].rec_buff))
2656
mi_check_print_error(param,"Not enough memory!");
2660
sort_param[i].key_length=share->rec_reflength;
2661
for (keyseg=sort_param[i].seg; keyseg->type != HA_KEYTYPE_END;
2664
sort_param[i].key_length+=keyseg->length;
2665
if (keyseg->flag & HA_SPACE_PACK)
2666
sort_param[i].key_length+=get_pack_length(keyseg->length);
2667
if (keyseg->flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART))
2668
sort_param[i].key_length+=2 + test(keyseg->length >= 127);
2669
if (keyseg->flag & HA_NULL_PART)
2670
sort_param[i].key_length++;
2672
total_key_length+=sort_param[i].key_length;
2674
sort_info.total_keys=i;
2675
sort_param[0].master= 1;
2676
sort_param[0].fix_datafile= (my_bool)(! rep_quick);
2677
sort_param[0].calc_checksum= test(param->testflag & T_CALC_CHECKSUM);
2679
sort_info.got_error=0;
2680
pthread_mutex_lock(&sort_info.mutex);
2683
Initialize the I/O cache share for use with the read caches and, in
2684
case of non-quick repair, the write cache. When all threads join on
2685
the cache lock, the writer copies the write cache contents to the
2691
init_io_cache_share(¶m->read_cache, &io_share, NULL, i);
2693
init_io_cache_share(&new_data_cache, &io_share, &info->rec_cache, i);
2696
io_share.total_threads= 0; /* share not used */
2698
(void) pthread_attr_init(&thr_attr);
2699
(void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
2701
for (i=0 ; i < sort_info.total_keys ; i++)
2704
Copy the properly initialized IO_CACHE structure so that every
2705
thread has its own copy. In quick mode param->read_cache is shared
2706
for use by all threads. In non-quick mode all threads but the
2707
first copy the shared new_data_cache, which is synchronized to the
2708
write cache of the first thread. The first thread copies
2709
param->read_cache, which is not shared.
2711
sort_param[i].read_cache= ((rep_quick || !i) ? param->read_cache :
2715
two approaches: the same amount of memory for each thread
2716
or the memory for the same number of keys for each thread...
2717
In the second one all the threads will fill their sort_buffers
2718
(and call write_keys) at the same time, putting more stress on i/o.
2720
sort_param[i].sortbuff_size=
2721
#ifndef USING_SECOND_APPROACH
2722
param->sort_buffer_length/sort_info.total_keys;
2724
param->sort_buffer_length*sort_param[i].key_length/total_key_length;
2726
if (pthread_create(&sort_param[i].thr, &thr_attr,
2728
(void *) (sort_param+i)))
2730
mi_check_print_error(param,"Cannot start a repair thread");
2731
/* Cleanup: Detach from the share. Avoid others to be blocked. */
2732
if (io_share.total_threads)
2733
remove_io_thread(&sort_param[i].read_cache);
2734
sort_info.got_error=1;
2737
sort_info.threads_running++;
2739
(void) pthread_attr_destroy(&thr_attr);
2741
/* waiting for all threads to finish */
2742
while (sort_info.threads_running)
2743
pthread_cond_wait(&sort_info.cond, &sort_info.mutex);
2744
pthread_mutex_unlock(&sort_info.mutex);
2746
if ((got_error= thr_write_keys(sort_param)))
2748
param->retry_repair=1;
2751
got_error=1; /* Assume the following may go wrong */
2753
if (sort_param[0].fix_datafile)
2756
Append some nuls to the end of a memory mapped file. Destroy the
2757
write cache. The master thread did already detach from the share
2758
by remove_io_thread() in sort.c:thr_find_all_keys().
2760
if (write_data_suffix(&sort_info,1) || end_io_cache(&info->rec_cache))
2762
if (param->testflag & T_SAFE_REPAIR)
2764
/* Don't repair if we loosed more than one row */
2765
if (info->state->records+1 < start_records)
2767
info->state->records=start_records;
2771
share->state.state.data_file_length= info->state->data_file_length=
2772
sort_param->filepos;
2773
/* Only whole records */
2774
share->state.version=(ulong) time((time_t*) 0);
2777
Exchange the data file descriptor of the table, so that we use the
2778
new file from now on.
2780
my_close(info->dfile,MYF(0));
2781
info->dfile=new_file;
2783
share->data_file_type=sort_info.new_data_file_type;
2784
share->pack.header_length=(ulong) new_header_length;
2787
info->state->data_file_length=sort_param->max_pos;
2789
if (rep_quick && del+sort_info.dupp != info->state->del)
2791
mi_check_print_error(param,"Couldn't fix table with quick recovery: Found wrong number of deleted records");
2792
mi_check_print_error(param,"Run recovery again without -q");
2793
param->retry_repair=1;
2794
param->testflag|=T_RETRY_WITHOUT_QUICK;
2798
if (rep_quick & T_FORCE_UNIQUENESS)
2800
my_off_t skr=info->state->data_file_length+
2801
(share->options & HA_OPTION_COMPRESS_RECORD ?
2802
MEMMAP_EXTRA_MARGIN : 0);
2804
if (share->data_file_type == STATIC_RECORD &&
2805
skr < share->base.reloc*share->base.min_pack_length)
2806
skr=share->base.reloc*share->base.min_pack_length;
2808
if (skr != sort_info.filelength && !info->s->base.raid_type)
2809
if (ftruncate(info->dfile, skr))
2810
mi_check_print_warning(param,
2811
"Can't change size of datafile, error: %d",
2814
if (param->testflag & T_CALC_CHECKSUM)
2815
info->state->checksum=param->glob_crc;
2817
if (ftruncate(share->kfile, info->state->key_file_length))
2818
mi_check_print_warning(param,
2819
"Can't change size of indexfile, error: %d", my_errno);
2821
if (!(param->testflag & T_SILENT))
2823
if (start_records != info->state->records)
2824
printf("Data records: %s\n", llstr(info->state->records,llbuff));
2826
mi_check_print_warning(param,
2827
"%s records have been removed",
2828
llstr(sort_info.dupp,llbuff));
2832
if (&share->state.state != info->state)
2833
memcpy(&share->state.state, info->state, sizeof(*info->state));
2836
got_error|= flush_blocks(param, share->key_cache, share->kfile);
2838
Destroy the write cache. The master thread did already detach from
2839
the share by remove_io_thread() or it was not yet started (if the
2840
error happend before creating the thread).
2842
VOID(end_io_cache(&info->rec_cache));
2844
Destroy the new data cache in case of non-quick repair. All slave
2845
threads did either detach from the share by remove_io_thread()
2846
already or they were not yet started (if the error happend before
2847
creating the threads).
2850
VOID(end_io_cache(&new_data_cache));
2853
/* Replace the actual file with the temporary file */
2856
my_close(new_file,MYF(0));
2857
info->dfile=new_file= -1;
2858
if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
2859
DATA_TMP_EXT, share->base.raid_chunks,
2860
(param->testflag & T_BACKUP_DATA ?
2861
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
2862
mi_open_datafile(info,share,-1))
2868
if (! param->error_printed)
2869
mi_check_print_error(param,"%d when fixing table",my_errno);
2872
VOID(my_close(new_file,MYF(0)));
2873
VOID(my_raid_delete(param->temp_filename,share->base.raid_chunks,
2875
if (info->dfile == new_file)
2878
mi_mark_crashed_on_repair(info);
2880
else if (key_map == share->state.key_map)
2881
share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
2882
share->state.changed|=STATE_NOT_SORTED_PAGES;
2884
pthread_cond_destroy (&sort_info.cond);
2885
pthread_mutex_destroy(&sort_info.mutex);
2887
my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
2888
my_free((uchar*) sort_param,MYF(MY_ALLOW_ZERO_PTR));
2889
my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
2890
VOID(end_io_cache(¶m->read_cache));
2891
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
2892
if (!got_error && (param->testflag & T_UNPACK))
2894
share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
2338
2895
share->pack.header_length=0;
2340
2897
return(got_error);
3147
3778
if (info->s->options & HA_OPTION_COMPRESS_RECORD)
3149
return (my_off_t)(lseek(info->s->kfile, 0L, SEEK_END) / 10 * 9) >
3780
return my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)) / 10 * 9 >
3150
3781
(my_off_t) info->s->base.max_key_file_length ||
3151
(my_off_t)(lseek(info->dfile, 0L, SEEK_END) / 10 * 9) >
3782
my_seek(info->dfile, 0L, MY_SEEK_END, MYF(0)) / 10 * 9 >
3152
3783
(my_off_t) info->s->base.max_data_file_length;
3786
/* Recreate table with bigger more alloced record-data */
3788
int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
3793
MI_KEYDEF *keyinfo,*key,*key_end;
3794
HA_KEYSEG *keysegs,*keyseg;
3795
MI_COLUMNDEF *recdef,*rec,*end;
3796
MI_UNIQUEDEF *uniquedef,*u_ptr,*u_end;
3797
MI_STATUS_INFO status_info;
3798
uint unpack,key_parts;
3799
ha_rows max_records;
3800
uint64_t file_length,tmp_length;
3801
MI_CREATE_INFO create_info;
3803
error=1; /* Default error */
3805
status_info= (*org_info)->state[0];
3806
info.state= &status_info;
3807
share= *(*org_info)->s;
3808
unpack= (share.options & HA_OPTION_COMPRESS_RECORD) &&
3809
(param->testflag & T_UNPACK);
3810
if (!(keyinfo=(MI_KEYDEF*) my_alloca(sizeof(MI_KEYDEF)*share.base.keys)))
3812
memcpy((uchar*) keyinfo,(uchar*) share.keyinfo,
3813
(size_t) (sizeof(MI_KEYDEF)*share.base.keys));
3815
key_parts= share.base.all_key_parts;
3816
if (!(keysegs=(HA_KEYSEG*) my_alloca(sizeof(HA_KEYSEG)*
3817
(key_parts+share.base.keys))))
3819
my_afree((uchar*) keyinfo);
3822
if (!(recdef=(MI_COLUMNDEF*)
3823
my_alloca(sizeof(MI_COLUMNDEF)*(share.base.fields+1))))
3825
my_afree((uchar*) keyinfo);
3826
my_afree((uchar*) keysegs);
3829
if (!(uniquedef=(MI_UNIQUEDEF*)
3830
my_alloca(sizeof(MI_UNIQUEDEF)*(share.state.header.uniques+1))))
3832
my_afree((uchar*) recdef);
3833
my_afree((uchar*) keyinfo);
3834
my_afree((uchar*) keysegs);
3838
/* Copy the column definitions */
3839
memcpy((uchar*) recdef,(uchar*) share.rec,
3840
(size_t) (sizeof(MI_COLUMNDEF)*(share.base.fields+1)));
3841
for (rec=recdef,end=recdef+share.base.fields; rec != end ; rec++)
3843
if (unpack && !(share.options & HA_OPTION_PACK_RECORD) &&
3844
rec->type != FIELD_BLOB &&
3845
rec->type != FIELD_VARCHAR &&
3846
rec->type != FIELD_CHECK)
3847
rec->type=(int) FIELD_NORMAL;
3850
/* Change the new key to point at the saved key segments */
3851
memcpy((uchar*) keysegs,(uchar*) share.keyparts,
3852
(size_t) (sizeof(HA_KEYSEG)*(key_parts+share.base.keys+
3853
share.state.header.uniques)));
3855
for (key=keyinfo,key_end=keyinfo+share.base.keys; key != key_end ; key++)
3858
for (; keyseg->type ; keyseg++)
3860
if (param->language)
3861
keyseg->language=param->language; /* change language */
3863
keyseg++; /* Skip end pointer */
3866
/* Copy the unique definitions and change them to point at the new key
3868
memcpy((uchar*) uniquedef,(uchar*) share.uniqueinfo,
3869
(size_t) (sizeof(MI_UNIQUEDEF)*(share.state.header.uniques)));
3870
for (u_ptr=uniquedef,u_end=uniquedef+share.state.header.uniques;
3871
u_ptr != u_end ; u_ptr++)
3874
keyseg+=u_ptr->keysegs+1;
3876
if (share.options & HA_OPTION_COMPRESS_RECORD)
3877
share.base.records=max_records=info.state->records;
3878
else if (share.base.min_pack_length)
3879
max_records=(ha_rows) (my_seek(info.dfile,0L,MY_SEEK_END,MYF(0)) /
3880
(ulong) share.base.min_pack_length);
3883
unpack= (share.options & HA_OPTION_COMPRESS_RECORD) &&
3884
(param->testflag & T_UNPACK);
3885
share.options&= ~HA_OPTION_TEMP_COMPRESS_RECORD;
3887
file_length=(uint64_t) my_seek(info.dfile,0L,MY_SEEK_END,MYF(0));
3888
tmp_length= file_length+file_length/10;
3889
set_if_bigger(file_length,param->max_data_file_length);
3890
set_if_bigger(file_length,tmp_length);
3891
set_if_bigger(file_length,(uint64_t) share.base.max_data_file_length);
3893
VOID(mi_close(*org_info));
3894
memset((char*) &create_info, 0, sizeof(create_info));
3895
create_info.max_rows=max(max_records,share.base.records);
3896
create_info.reloc_rows=share.base.reloc;
3897
create_info.old_options=(share.options |
3898
(unpack ? HA_OPTION_TEMP_COMPRESS_RECORD : 0));
3900
create_info.data_file_length=file_length;
3901
create_info.auto_increment=share.state.auto_increment;
3902
create_info.language = (param->language ? param->language :
3903
share.state.header.language);
3904
create_info.key_file_length= status_info.key_file_length;
3906
Allow for creating an auto_increment key. This has an effect only if
3907
an auto_increment key exists in the original table.
3909
create_info.with_auto_increment= true;
3910
/* We don't have to handle symlinks here because we are using
3911
HA_DONT_TOUCH_DATA */
3912
if (mi_create(filename,
3913
share.base.keys - share.state.header.uniques,
3914
keyinfo, share.base.fields, recdef,
3915
share.state.header.uniques, uniquedef,
3917
HA_DONT_TOUCH_DATA))
3919
mi_check_print_error(param,"Got error %d when trying to recreate indexfile",my_errno);
3922
*org_info=mi_open(filename,O_RDWR,
3923
(param->testflag & T_WAIT_FOREVER) ? HA_OPEN_WAIT_IF_LOCKED :
3924
(param->testflag & T_DESCRIPT) ? HA_OPEN_IGNORE_IF_LOCKED :
3925
HA_OPEN_ABORT_IF_LOCKED);
3928
mi_check_print_error(param,"Got error %d when trying to open re-created indexfile",
3932
/* We are modifing */
3933
(*org_info)->s->options&= ~HA_OPTION_READ_ONLY_DATA;
3934
VOID(_mi_readinfo(*org_info,F_WRLCK,0));
3935
(*org_info)->state->records=info.state->records;
3936
if (share.state.create_time)
3937
(*org_info)->s->state.create_time=share.state.create_time;
3938
(*org_info)->s->state.unique=(*org_info)->this_unique=
3940
(*org_info)->state->checksum=info.state->checksum;
3941
(*org_info)->state->del=info.state->del;
3942
(*org_info)->s->state.dellink=share.state.dellink;
3943
(*org_info)->state->empty=info.state->empty;
3944
(*org_info)->state->data_file_length=info.state->data_file_length;
3945
if (update_state_info(param,*org_info,UPDATE_TIME | UPDATE_STAT |
3950
my_afree((uchar*) uniquedef);
3951
my_afree((uchar*) keyinfo);
3952
my_afree((uchar*) recdef);
3953
my_afree((uchar*) keysegs);
3156
3958
/* write suffix to data file if neaded */
3158
int write_data_suffix(SORT_INFO *sort_info, bool fix_datafile)
3960
int write_data_suffix(SORT_INFO *sort_info, my_bool fix_datafile)
3160
3962
MI_INFO *info=sort_info->info;
3162
3964
if (info->s->options & HA_OPTION_COMPRESS_RECORD && fix_datafile)
3164
unsigned char buff[MEMMAP_EXTRA_MARGIN];
3966
uchar buff[MEMMAP_EXTRA_MARGIN];
3165
3967
memset(buff, 0, sizeof(buff));
3166
3968
if (my_b_write(&info->rec_cache,buff,sizeof(buff)))
3168
3970
mi_check_print_error(sort_info->param,
3169
"%d when writing to datafile",errno);
3971
"%d when writing to datafile",my_errno);
3172
3974
sort_info->param->read_cache.end_of_file+=sizeof(buff);