2285
2304
memcpy( &share->state.state, info->state, sizeof(*info->state));
2288
got_error|= flush_blocks(param, share->getKeyCache(), share->kfile);
2289
info->rec_cache.end_io_cache();
2292
/* Replace the actual file with the temporary file */
2295
internal::my_close(new_file,MYF(0));
2296
info->dfile=new_file= -1;
2297
if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
2298
DATA_TMP_EXT, share->base.raid_chunks,
2299
(param->testflag & T_BACKUP_DATA ?
2300
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
2301
mi_open_datafile(info,share,-1))
2307
if (! param->error_printed)
2308
mi_check_print_error(param,"%d when fixing table",errno);
2311
internal::my_close(new_file,MYF(0));
2312
my_delete(param->temp_filename, MYF(MY_WME));
2313
if (info->dfile == new_file)
2316
mi_mark_crashed_on_repair(info);
2318
else if (key_map == share->state.key_map)
2319
share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
2320
share->state.changed|=STATE_NOT_SORTED_PAGES;
2322
void * rec_buff_ptr= NULL;
2323
rec_buff_ptr= mi_get_rec_buff_ptr(info, sort_param.rec_buff);
2324
if (rec_buff_ptr != NULL)
2326
rec_buff_ptr= mi_get_rec_buff_ptr(info, sort_param.record);
2327
if (rec_buff_ptr != NULL)
2331
free((unsigned char*) sort_info.key_block);
2332
free(sort_info.buff);
2333
param->read_cache.end_io_cache();
2334
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
2335
if (!got_error && (param->testflag & T_UNPACK))
2337
share->state.header.options[0]&= (unsigned char) ~HA_OPTION_COMPRESS_RECORD;
2307
got_error|= flush_blocks(param, share->key_cache, share->kfile);
2308
VOID(end_io_cache(&info->rec_cache));
2311
/* Replace the actual file with the temporary file */
2314
my_close(new_file,MYF(0));
2315
info->dfile=new_file= -1;
2316
if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
2317
DATA_TMP_EXT, share->base.raid_chunks,
2318
(param->testflag & T_BACKUP_DATA ?
2319
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
2320
mi_open_datafile(info,share,-1))
2326
if (! param->error_printed)
2327
mi_check_print_error(param,"%d when fixing table",my_errno);
2330
VOID(my_close(new_file,MYF(0)));
2331
VOID(my_raid_delete(param->temp_filename,share->base.raid_chunks,
2333
if (info->dfile == new_file)
2336
mi_mark_crashed_on_repair(info);
2338
else if (key_map == share->state.key_map)
2339
share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
2340
share->state.changed|=STATE_NOT_SORTED_PAGES;
2342
my_free(mi_get_rec_buff_ptr(info, sort_param.rec_buff),
2343
MYF(MY_ALLOW_ZERO_PTR));
2344
my_free(mi_get_rec_buff_ptr(info, sort_param.record),
2345
MYF(MY_ALLOW_ZERO_PTR));
2346
my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
2347
my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
2348
VOID(end_io_cache(¶m->read_cache));
2349
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
2350
if (!got_error && (param->testflag & T_UNPACK))
2352
share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
2353
share->pack.header_length=0;
2359
Threaded repair of table using sorting
2362
mi_repair_parallel()
2363
param Repair parameters
2364
info MyISAM handler to repair
2365
name Name of table (for warnings)
2366
rep_quick set to <> 0 if we should not change data file
2369
Same as mi_repair_by_sort but do it multithreaded
2370
Each key is handled by a separate thread.
2371
TODO: make a number of threads a parameter
2373
In parallel repair we use one thread per index. There are two modes:
2377
Only the indexes are rebuilt. All threads share a read buffer.
2378
Every thread that needs fresh data in the buffer enters the shared
2379
cache lock. The last thread joining the lock reads the buffer from
2380
the data file and wakes all other threads.
2384
The data file is rebuilt and all indexes are rebuilt to point to
2385
the new record positions. One thread is the master thread. It
2386
reads from the old data file and writes to the new data file. It
2387
also creates one of the indexes. The other threads read from a
2388
buffer which is filled by the master. If they need fresh data,
2389
they enter the shared cache lock. If the masters write buffer is
2390
full, it flushes it to the new data file and enters the shared
2391
cache lock too. When all threads joined in the lock, the master
2392
copies its write buffer to the read buffer for the other threads
2400
int mi_repair_parallel(MI_CHECK *param, register MI_INFO *info,
2401
const char * name, int rep_quick)
2404
uint i,key, total_key_length, istep;
2406
ha_rows start_records;
2407
my_off_t new_header_length,del;
2409
MI_SORT_PARAM *sort_param=0;
2410
MYISAM_SHARE *share=info->s;
2411
ulong *rec_per_key_part;
2414
IO_CACHE new_data_cache; /* For non-quick repair. */
2415
IO_CACHE_SHARE io_share;
2416
SORT_INFO sort_info;
2417
uint64_t key_map= 0;
2418
pthread_attr_t thr_attr;
2419
ulong max_pack_reclength;
2421
start_records=info->state->records;
2424
new_header_length=(param->testflag & T_UNPACK) ? 0 :
2425
share->pack.header_length;
2426
if (!(param->testflag & T_SILENT))
2428
printf("- parallel recovering (with sort) MyISAM-table '%s'\n",name);
2429
printf("Data records: %s\n", llstr(start_records,llbuff));
2431
param->testflag|=T_REP; /* for easy checking */
2433
if (info->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD))
2434
param->testflag|=T_CALC_CHECKSUM;
2437
Quick repair (not touching data file, rebuilding indexes):
2439
Read cache is (MI_CHECK *param)->read_cache using info->dfile.
2442
Non-quick repair (rebuilding data file and indexes):
2446
Read cache is (MI_CHECK *param)->read_cache using info->dfile.
2447
Write cache is (MI_INFO *info)->rec_cache using new_file.
2451
Read cache is new_data_cache synced to master rec_cache.
2453
The final assignment of the filedescriptor for rec_cache is done
2454
after the cache creation.
2456
Don't check file size on new_data_cache, as the resulting file size
2459
As rec_cache and new_data_cache are synced, write_buffer_length is
2460
used for the read cache 'new_data_cache'. Both start at the same
2461
position 'new_header_length'.
2464
memset(&sort_info, 0, sizeof(sort_info));
2465
/* Initialize pthread structures before goto err. */
2466
pthread_mutex_init(&sort_info.mutex, MY_MUTEX_INIT_FAST);
2467
pthread_cond_init(&sort_info.cond, 0);
2469
if (!(sort_info.key_block=
2470
alloc_key_blocks(param, (uint) param->sort_key_blocks,
2471
share->base.max_key_block_length)) ||
2472
init_io_cache(¶m->read_cache, info->dfile,
2473
(uint) param->read_buffer_length,
2474
READ_CACHE, share->pack.header_length, 1, MYF(MY_WME)) ||
2476
(init_io_cache(&info->rec_cache, info->dfile,
2477
(uint) param->write_buffer_length,
2478
WRITE_CACHE, new_header_length, 1,
2479
MYF(MY_WME | MY_WAIT_IF_FULL) & param->myf_rw) ||
2480
init_io_cache(&new_data_cache, -1,
2481
(uint) param->write_buffer_length,
2482
READ_CACHE, new_header_length, 1,
2483
MYF(MY_WME | MY_DONT_CHECK_FILESIZE)))))
2485
sort_info.key_block_end=sort_info.key_block+param->sort_key_blocks;
2486
info->opt_flag|=WRITE_CACHE_USED;
2487
info->rec_cache.file=info->dfile; /* for sort_delete_record */
2491
/* Get real path for data file */
2492
if ((new_file=my_raid_create(fn_format(param->temp_filename,
2493
share->data_file_name, "",
2496
0,param->tmpfile_createflag,
2497
share->base.raid_type,
2498
share->base.raid_chunks,
2499
share->base.raid_chunksize,
2502
mi_check_print_error(param,"Can't create new tempfile: '%s'",
2503
param->temp_filename);
2506
if (new_header_length &&
2507
filecopy(param, new_file,info->dfile,0L,new_header_length,
2510
if (param->testflag & T_UNPACK)
2512
share->options&= ~HA_OPTION_COMPRESS_RECORD;
2513
mi_int2store(share->state.header.options,share->options);
2515
share->state.dellink= HA_OFFSET_ERROR;
2516
info->rec_cache.file=new_file;
2519
info->update= (short) (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED);
2521
/* Optionally drop indexes and optionally modify the key_map. */
2522
mi_drop_all_indexes(param, info, false);
2523
key_map= share->state.key_map;
2524
if (param->testflag & T_CREATE_MISSING_KEYS)
2526
/* Invert the copied key_map to recreate all disabled indexes. */
2530
sort_info.info=info;
2531
sort_info.param = param;
2533
set_data_file_type(&sort_info, share);
2536
param->read_cache.end_of_file=sort_info.filelength=
2537
my_seek(param->read_cache.file,0L,MY_SEEK_END,MYF(0));
2539
if (share->data_file_type == DYNAMIC_RECORD)
2540
rec_length=max(share->base.min_pack_length+1,share->base.min_block_length);
2541
else if (share->data_file_type == COMPRESSED_RECORD)
2542
rec_length=share->base.min_block_length;
2544
rec_length=share->base.pack_reclength;
2546
+1 below is required hack for parallel repair mode.
2547
The info->state->records value, that is compared later
2548
to sort_info.max_records and cannot exceed it, is
2549
increased in sort_key_write. In mi_repair_by_sort, sort_key_write
2550
is called after sort_key_read, where the comparison is performed,
2551
but in parallel mode master thread can call sort_key_write
2552
before some other repair thread calls sort_key_read.
2553
Furthermore I'm not even sure +1 would be enough.
2554
May be sort_info.max_records shold be always set to max value in
2557
sort_info.max_records=
2558
((param->testflag & T_CREATE_MISSING_KEYS) ? info->state->records + 1:
2559
(ha_rows) (sort_info.filelength/rec_length+1));
2561
del=info->state->del;
2563
/* for compressed tables */
2564
max_pack_reclength= share->base.pack_reclength;
2565
if (share->options & HA_OPTION_COMPRESS_RECORD)
2566
set_if_bigger(max_pack_reclength, share->max_pack_length);
2567
if (!(sort_param=(MI_SORT_PARAM *)
2568
my_malloc((uint) share->base.keys *
2569
(sizeof(MI_SORT_PARAM) + max_pack_reclength),
2572
mi_check_print_error(param,"Not enough memory for key!");
2576
rec_per_key_part= param->rec_per_key_part;
2577
info->state->records=info->state->del=share->state.split=0;
2578
info->state->empty=0;
2580
for (i=key=0, istep=1 ; key < share->base.keys ;
2581
rec_per_key_part+=sort_param[i].keyinfo->keysegs, i+=istep, key++)
2583
sort_param[i].key=key;
2584
sort_param[i].keyinfo=share->keyinfo+key;
2585
sort_param[i].seg=sort_param[i].keyinfo->seg;
2587
Skip this index if it is marked disabled in the copied
2588
(and possibly inverted) key_map.
2590
if (! mi_is_key_active(key_map, key))
2592
/* Remember old statistics for key */
2593
assert(rec_per_key_part >= param->rec_per_key_part);
2594
memcpy(rec_per_key_part,
2595
(share->state.rec_per_key_part +
2596
(rec_per_key_part - param->rec_per_key_part)),
2597
sort_param[i].keyinfo->keysegs*sizeof(*rec_per_key_part));
2602
if ((!(param->testflag & T_SILENT)))
2603
printf ("- Fixing index %d\n",key+1);
2605
sort_param[i].key_read=sort_key_read;
2606
sort_param[i].key_write=sort_key_write;
2608
sort_param[i].key_cmp=sort_key_cmp;
2609
sort_param[i].lock_in_memory=lock_memory;
2610
sort_param[i].tmpdir=param->tmpdir;
2611
sort_param[i].sort_info=&sort_info;
2612
sort_param[i].master=0;
2613
sort_param[i].fix_datafile=0;
2614
sort_param[i].calc_checksum= 0;
2616
sort_param[i].filepos=new_header_length;
2617
sort_param[i].max_pos=sort_param[i].pos=share->pack.header_length;
2619
sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+
2620
(max_pack_reclength * i));
2621
if (!mi_alloc_rec_buff(info, -1, &sort_param[i].rec_buff))
2623
mi_check_print_error(param,"Not enough memory!");
2627
sort_param[i].key_length=share->rec_reflength;
2628
for (keyseg=sort_param[i].seg; keyseg->type != HA_KEYTYPE_END;
2631
sort_param[i].key_length+=keyseg->length;
2632
if (keyseg->flag & HA_SPACE_PACK)
2633
sort_param[i].key_length+=get_pack_length(keyseg->length);
2634
if (keyseg->flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART))
2635
sort_param[i].key_length+=2 + test(keyseg->length >= 127);
2636
if (keyseg->flag & HA_NULL_PART)
2637
sort_param[i].key_length++;
2639
total_key_length+=sort_param[i].key_length;
2641
sort_info.total_keys=i;
2642
sort_param[0].master= 1;
2643
sort_param[0].fix_datafile= (bool)(! rep_quick);
2644
sort_param[0].calc_checksum= test(param->testflag & T_CALC_CHECKSUM);
2646
sort_info.got_error=0;
2647
pthread_mutex_lock(&sort_info.mutex);
2650
Initialize the I/O cache share for use with the read caches and, in
2651
case of non-quick repair, the write cache. When all threads join on
2652
the cache lock, the writer copies the write cache contents to the
2658
init_io_cache_share(¶m->read_cache, &io_share, NULL, i);
2660
init_io_cache_share(&new_data_cache, &io_share, &info->rec_cache, i);
2663
io_share.total_threads= 0; /* share not used */
2665
(void) pthread_attr_init(&thr_attr);
2666
(void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED);
2668
for (i=0 ; i < sort_info.total_keys ; i++)
2671
Copy the properly initialized IO_CACHE structure so that every
2672
thread has its own copy. In quick mode param->read_cache is shared
2673
for use by all threads. In non-quick mode all threads but the
2674
first copy the shared new_data_cache, which is synchronized to the
2675
write cache of the first thread. The first thread copies
2676
param->read_cache, which is not shared.
2678
sort_param[i].read_cache= ((rep_quick || !i) ? param->read_cache :
2682
two approaches: the same amount of memory for each thread
2683
or the memory for the same number of keys for each thread...
2684
In the second one all the threads will fill their sort_buffers
2685
(and call write_keys) at the same time, putting more stress on i/o.
2687
sort_param[i].sortbuff_size=
2688
#ifndef USING_SECOND_APPROACH
2689
param->sort_buffer_length/sort_info.total_keys;
2691
param->sort_buffer_length*sort_param[i].key_length/total_key_length;
2693
if (pthread_create(&sort_param[i].thr, &thr_attr,
2695
(void *) (sort_param+i)))
2697
mi_check_print_error(param,"Cannot start a repair thread");
2698
/* Cleanup: Detach from the share. Avoid others to be blocked. */
2699
if (io_share.total_threads)
2700
remove_io_thread(&sort_param[i].read_cache);
2701
sort_info.got_error=1;
2704
sort_info.threads_running++;
2706
(void) pthread_attr_destroy(&thr_attr);
2708
/* waiting for all threads to finish */
2709
while (sort_info.threads_running)
2710
pthread_cond_wait(&sort_info.cond, &sort_info.mutex);
2711
pthread_mutex_unlock(&sort_info.mutex);
2713
if ((got_error= thr_write_keys(sort_param)))
2715
param->retry_repair=1;
2718
got_error=1; /* Assume the following may go wrong */
2720
if (sort_param[0].fix_datafile)
2723
Append some nuls to the end of a memory mapped file. Destroy the
2724
write cache. The master thread did already detach from the share
2725
by remove_io_thread() in sort.c:thr_find_all_keys().
2727
if (write_data_suffix(&sort_info,1) || end_io_cache(&info->rec_cache))
2729
if (param->testflag & T_SAFE_REPAIR)
2731
/* Don't repair if we loosed more than one row */
2732
if (info->state->records+1 < start_records)
2734
info->state->records=start_records;
2738
share->state.state.data_file_length= info->state->data_file_length=
2739
sort_param->filepos;
2740
/* Only whole records */
2741
share->state.version=(ulong) time((time_t*) 0);
2744
Exchange the data file descriptor of the table, so that we use the
2745
new file from now on.
2747
my_close(info->dfile,MYF(0));
2748
info->dfile=new_file;
2750
share->data_file_type=sort_info.new_data_file_type;
2751
share->pack.header_length=(ulong) new_header_length;
2754
info->state->data_file_length=sort_param->max_pos;
2756
if (rep_quick && del+sort_info.dupp != info->state->del)
2758
mi_check_print_error(param,"Couldn't fix table with quick recovery: Found wrong number of deleted records");
2759
mi_check_print_error(param,"Run recovery again without -q");
2760
param->retry_repair=1;
2761
param->testflag|=T_RETRY_WITHOUT_QUICK;
2765
if (rep_quick & T_FORCE_UNIQUENESS)
2767
my_off_t skr=info->state->data_file_length+
2768
(share->options & HA_OPTION_COMPRESS_RECORD ?
2769
MEMMAP_EXTRA_MARGIN : 0);
2771
if (share->data_file_type == STATIC_RECORD &&
2772
skr < share->base.reloc*share->base.min_pack_length)
2773
skr=share->base.reloc*share->base.min_pack_length;
2775
if (skr != sort_info.filelength && !info->s->base.raid_type)
2776
if (ftruncate(info->dfile, skr))
2777
mi_check_print_warning(param,
2778
"Can't change size of datafile, error: %d",
2781
if (param->testflag & T_CALC_CHECKSUM)
2782
info->state->checksum=param->glob_crc;
2784
if (ftruncate(share->kfile, info->state->key_file_length))
2785
mi_check_print_warning(param,
2786
"Can't change size of indexfile, error: %d", my_errno);
2788
if (!(param->testflag & T_SILENT))
2790
if (start_records != info->state->records)
2791
printf("Data records: %s\n", llstr(info->state->records,llbuff));
2793
mi_check_print_warning(param,
2794
"%s records have been removed",
2795
llstr(sort_info.dupp,llbuff));
2799
if (&share->state.state != info->state)
2800
memcpy(&share->state.state, info->state, sizeof(*info->state));
2803
got_error|= flush_blocks(param, share->key_cache, share->kfile);
2805
Destroy the write cache. The master thread did already detach from
2806
the share by remove_io_thread() or it was not yet started (if the
2807
error happend before creating the thread).
2809
VOID(end_io_cache(&info->rec_cache));
2811
Destroy the new data cache in case of non-quick repair. All slave
2812
threads did either detach from the share by remove_io_thread()
2813
already or they were not yet started (if the error happend before
2814
creating the threads).
2817
VOID(end_io_cache(&new_data_cache));
2820
/* Replace the actual file with the temporary file */
2823
my_close(new_file,MYF(0));
2824
info->dfile=new_file= -1;
2825
if (change_to_newfile(share->data_file_name,MI_NAME_DEXT,
2826
DATA_TMP_EXT, share->base.raid_chunks,
2827
(param->testflag & T_BACKUP_DATA ?
2828
MYF(MY_REDEL_MAKE_BACKUP): MYF(0))) ||
2829
mi_open_datafile(info,share,-1))
2835
if (! param->error_printed)
2836
mi_check_print_error(param,"%d when fixing table",my_errno);
2839
VOID(my_close(new_file,MYF(0)));
2840
VOID(my_raid_delete(param->temp_filename,share->base.raid_chunks,
2842
if (info->dfile == new_file)
2845
mi_mark_crashed_on_repair(info);
2847
else if (key_map == share->state.key_map)
2848
share->state.changed&= ~STATE_NOT_OPTIMIZED_KEYS;
2849
share->state.changed|=STATE_NOT_SORTED_PAGES;
2851
pthread_cond_destroy (&sort_info.cond);
2852
pthread_mutex_destroy(&sort_info.mutex);
2854
my_free((uchar*) sort_info.key_block,MYF(MY_ALLOW_ZERO_PTR));
2855
my_free((uchar*) sort_param,MYF(MY_ALLOW_ZERO_PTR));
2856
my_free(sort_info.buff,MYF(MY_ALLOW_ZERO_PTR));
2857
VOID(end_io_cache(¶m->read_cache));
2858
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
2859
if (!got_error && (param->testflag & T_UNPACK))
2861
share->state.header.options[0]&= (uchar) ~HA_OPTION_COMPRESS_RECORD;
2338
2862
share->pack.header_length=0;
2340
2864
return(got_error);
3147
3666
if (info->s->options & HA_OPTION_COMPRESS_RECORD)
3149
return (my_off_t)(lseek(info->s->kfile, 0L, SEEK_END) / 10 * 9) >
3668
return my_seek(info->s->kfile, 0L, MY_SEEK_END, MYF(MY_THREADSAFE)) / 10 * 9 >
3150
3669
(my_off_t) info->s->base.max_key_file_length ||
3151
(my_off_t)(lseek(info->dfile, 0L, SEEK_END) / 10 * 9) >
3670
my_seek(info->dfile, 0L, MY_SEEK_END, MYF(0)) / 10 * 9 >
3152
3671
(my_off_t) info->s->base.max_data_file_length;
3674
/* Recreate table with bigger more alloced record-data */
3676
int recreate_table(MI_CHECK *param, MI_INFO **org_info, char *filename)
3681
MI_KEYDEF *keyinfo,*key,*key_end;
3682
HA_KEYSEG *keysegs,*keyseg;
3683
MI_COLUMNDEF *recdef,*rec,*end;
3684
MI_UNIQUEDEF *uniquedef,*u_ptr,*u_end;
3685
MI_STATUS_INFO status_info;
3686
uint unpack,key_parts;
3687
ha_rows max_records;
3688
uint64_t file_length,tmp_length;
3689
MI_CREATE_INFO create_info;
3691
error=1; /* Default error */
3693
status_info= (*org_info)->state[0];
3694
info.state= &status_info;
3695
share= *(*org_info)->s;
3696
unpack= (share.options & HA_OPTION_COMPRESS_RECORD) &&
3697
(param->testflag & T_UNPACK);
3698
if (!(keyinfo=(MI_KEYDEF*) my_alloca(sizeof(MI_KEYDEF)*share.base.keys)))
3700
memcpy(keyinfo,share.keyinfo,sizeof(MI_KEYDEF)*share.base.keys);
3702
key_parts= share.base.all_key_parts;
3703
if (!(keysegs=(HA_KEYSEG*) my_alloca(sizeof(HA_KEYSEG)*
3704
(key_parts+share.base.keys))))
3706
my_afree((uchar*) keyinfo);
3709
if (!(recdef=(MI_COLUMNDEF*)
3710
my_alloca(sizeof(MI_COLUMNDEF)*(share.base.fields+1))))
3712
my_afree((uchar*) keyinfo);
3713
my_afree((uchar*) keysegs);
3716
if (!(uniquedef=(MI_UNIQUEDEF*)
3717
my_alloca(sizeof(MI_UNIQUEDEF)*(share.state.header.uniques+1))))
3719
my_afree((uchar*) recdef);
3720
my_afree((uchar*) keyinfo);
3721
my_afree((uchar*) keysegs);
3725
/* Copy the column definitions */
3726
memcpy(recdef, share.rec, sizeof(MI_COLUMNDEF)*(share.base.fields+1));
3727
for (rec=recdef,end=recdef+share.base.fields; rec != end ; rec++)
3729
if (unpack && !(share.options & HA_OPTION_PACK_RECORD) &&
3730
rec->type != FIELD_BLOB &&
3731
rec->type != FIELD_VARCHAR &&
3732
rec->type != FIELD_CHECK)
3733
rec->type=(int) FIELD_NORMAL;
3736
/* Change the new key to point at the saved key segments */
3737
memcpy(keysegs,share.keyparts,
3738
sizeof(HA_KEYSEG)*(key_parts+share.base.keys+
3739
share.state.header.uniques));
3741
for (key=keyinfo,key_end=keyinfo+share.base.keys; key != key_end ; key++)
3744
for (; keyseg->type ; keyseg++)
3746
if (param->language)
3747
keyseg->language=param->language; /* change language */
3749
keyseg++; /* Skip end pointer */
3752
/* Copy the unique definitions and change them to point at the new key
3754
memcpy(uniquedef,share.uniqueinfo,
3755
sizeof(MI_UNIQUEDEF)*(share.state.header.uniques));
3756
for (u_ptr=uniquedef,u_end=uniquedef+share.state.header.uniques;
3757
u_ptr != u_end ; u_ptr++)
3760
keyseg+=u_ptr->keysegs+1;
3762
if (share.options & HA_OPTION_COMPRESS_RECORD)
3763
share.base.records=max_records=info.state->records;
3764
else if (share.base.min_pack_length)
3765
max_records=(ha_rows) (my_seek(info.dfile,0L,MY_SEEK_END,MYF(0)) /
3766
(ulong) share.base.min_pack_length);
3769
unpack= (share.options & HA_OPTION_COMPRESS_RECORD) &&
3770
(param->testflag & T_UNPACK);
3771
share.options&= ~HA_OPTION_TEMP_COMPRESS_RECORD;
3773
file_length=(uint64_t) my_seek(info.dfile,0L,MY_SEEK_END,MYF(0));
3774
tmp_length= file_length+file_length/10;
3775
set_if_bigger(file_length,param->max_data_file_length);
3776
set_if_bigger(file_length,tmp_length);
3777
set_if_bigger(file_length,(uint64_t) share.base.max_data_file_length);
3779
VOID(mi_close(*org_info));
3780
memset(&create_info, 0, sizeof(create_info));
3781
create_info.max_rows=max(max_records,share.base.records);
3782
create_info.reloc_rows=share.base.reloc;
3783
create_info.old_options=(share.options |
3784
(unpack ? HA_OPTION_TEMP_COMPRESS_RECORD : 0));
3786
create_info.data_file_length=file_length;
3787
create_info.auto_increment=share.state.auto_increment;
3788
create_info.language = (param->language ? param->language :
3789
share.state.header.language);
3790
create_info.key_file_length= status_info.key_file_length;
3792
Allow for creating an auto_increment key. This has an effect only if
3793
an auto_increment key exists in the original table.
3795
create_info.with_auto_increment= true;
3796
/* We don't have to handle symlinks here because we are using
3797
HA_DONT_TOUCH_DATA */
3798
if (mi_create(filename,
3799
share.base.keys - share.state.header.uniques,
3800
keyinfo, share.base.fields, recdef,
3801
share.state.header.uniques, uniquedef,
3803
HA_DONT_TOUCH_DATA))
3805
mi_check_print_error(param,"Got error %d when trying to recreate indexfile",my_errno);
3808
*org_info=mi_open(filename,O_RDWR,
3809
(param->testflag & T_WAIT_FOREVER) ? HA_OPEN_WAIT_IF_LOCKED :
3810
(param->testflag & T_DESCRIPT) ? HA_OPEN_IGNORE_IF_LOCKED :
3811
HA_OPEN_ABORT_IF_LOCKED);
3814
mi_check_print_error(param,"Got error %d when trying to open re-created indexfile",
3818
/* We are modifing */
3819
(*org_info)->s->options&= ~HA_OPTION_READ_ONLY_DATA;
3820
VOID(_mi_readinfo(*org_info,F_WRLCK,0));
3821
(*org_info)->state->records=info.state->records;
3822
if (share.state.create_time)
3823
(*org_info)->s->state.create_time=share.state.create_time;
3824
(*org_info)->s->state.unique=(*org_info)->this_unique=
3826
(*org_info)->state->checksum=info.state->checksum;
3827
(*org_info)->state->del=info.state->del;
3828
(*org_info)->s->state.dellink=share.state.dellink;
3829
(*org_info)->state->empty=info.state->empty;
3830
(*org_info)->state->data_file_length=info.state->data_file_length;
3831
if (update_state_info(param,*org_info,UPDATE_TIME | UPDATE_STAT |
3836
my_afree((uchar*) uniquedef);
3837
my_afree((uchar*) keyinfo);
3838
my_afree((uchar*) recdef);
3839
my_afree((uchar*) keysegs);
3156
3844
/* write suffix to data file if neaded */