~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/row/row0merge.c

  • Committer: Brian Aker
  • Date: 2010-10-12 05:21:02 UTC
  • Revision ID: brian@tangent.org-20101012052102-9yrbu1ye7n8n4b6n
Remove dead code.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (c) 2005, 2010, Innobase Oy. All Rights Reserved.
 
3
Copyright (c) 2005, 2009, Innobase Oy. All Rights Reserved.
4
4
 
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
58
58
#include "handler0alter.h"
59
59
#include <unistd.h>
60
60
 
61
 
/* Ignore posix_fadvise() on those platforms where it does not exist */
62
 
#if defined __WIN__
63
 
# define posix_fadvise(fd, offset, len, advice) /* nothing */
64
 
#endif /* __WIN__ */
65
 
 
66
61
#ifdef UNIV_DEBUG
67
62
/** Set these in order ot enable debug printout. */
68
63
/* @{ */
69
 
/** Log the outcome of each row_merge_cmp() call, comparing records. */
70
64
static ibool    row_merge_print_cmp;
71
 
/** Log each record read from temporary file. */
72
65
static ibool    row_merge_print_read;
73
 
/** Log each record write to temporary file. */
74
66
static ibool    row_merge_print_write;
75
 
/** Log each row_merge_blocks() call, merging two blocks of records to
76
 
a bigger one. */
77
 
static ibool    row_merge_print_block;
78
 
/** Log each block read from temporary file. */
79
 
static ibool    row_merge_print_block_read;
80
 
/** Log each block read from temporary file. */
81
 
static ibool    row_merge_print_block_write;
82
67
/* @} */
83
68
#endif /* UNIV_DEBUG */
84
69
 
125
110
 
126
111
/** Information about temporary files used in merge sort */
127
112
struct merge_file_struct {
128
 
        int             fd;             /*!< file descriptor */
129
 
        ulint           offset;         /*!< file offset (end of file) */
130
 
        ib_uint64_t     n_rec;          /*!< number of records in the file */
 
113
        int     fd;             /*!< file descriptor */
 
114
        ulint   offset;         /*!< file offset */
131
115
};
132
116
 
133
117
/** Information about temporary files used in merge sort */
275
259
        const dict_index_t*     index;
276
260
        dfield_t*               entry;
277
261
        dfield_t*               field;
278
 
        const dict_field_t*     ifield;
279
262
 
280
263
        if (buf->n_tuples >= buf->max_tuples) {
281
264
                return(FALSE);
294
277
        data_size = 0;
295
278
        extra_size = UT_BITS_IN_BYTES(index->n_nullable);
296
279
 
297
 
        ifield = dict_index_get_nth_field(index, 0);
298
 
 
299
 
        for (i = 0; i < n_fields; i++, field++, ifield++) {
 
280
        for (i = 0; i < n_fields; i++, field++) {
 
281
                const dict_field_t*     ifield;
300
282
                const dict_col_t*       col;
301
283
                ulint                   col_no;
302
284
                const dfield_t*         row_field;
303
285
                ulint                   len;
304
286
 
 
287
                ifield = dict_index_get_nth_field(index, i);
305
288
                col = ifield->col;
306
289
                col_no = dict_col_get_no(col);
307
290
                row_field = dtuple_get_nth_field(row, col_no);
339
322
                if (ifield->prefix_len) {
340
323
                        len = dtype_get_at_most_n_mbchars(
341
324
                                col->prtype,
342
 
                                col->mbminmaxlen,
 
325
                                col->mbminlen, col->mbmaxlen,
343
326
                                ifield->prefix_len,
344
327
                                len, dfield_get_data(field));
345
328
                        dfield_set_len(field, len);
415
398
/** Structure for reporting duplicate records. */
416
399
struct row_merge_dup_struct {
417
400
        const dict_index_t*     index;          /*!< index being sorted */
418
 
        struct TABLE*           table;          /*!< MySQL table object */
 
401
        TABLE*                  table;          /*!< MySQL table object */
419
402
        ulint                   n_dup;          /*!< number of duplicates */
420
403
};
421
404
 
431
414
        row_merge_dup_t*        dup,    /*!< in/out: for reporting duplicates */
432
415
        const dfield_t*         entry)  /*!< in: duplicate index entry */
433
416
{
434
 
        mrec_buf_t*             buf;
 
417
        mrec_buf_t              buf;
435
418
        const dtuple_t*         tuple;
436
419
        dtuple_t                tuple_store;
437
420
        const rec_t*            rec;
438
421
        const dict_index_t*     index   = dup->index;
439
422
        ulint                   n_fields= dict_index_get_n_fields(index);
440
 
        mem_heap_t*             heap;
 
423
        mem_heap_t*             heap    = NULL;
 
424
        ulint                   offsets_[REC_OFFS_NORMAL_SIZE];
441
425
        ulint*                  offsets;
442
426
        ulint                   n_ext;
443
427
 
447
431
                return;
448
432
        }
449
433
 
 
434
        rec_offs_init(offsets_);
 
435
 
450
436
        /* Convert the tuple to a record and then to MySQL format. */
451
 
        heap = mem_heap_create((1 + REC_OFFS_HEADER_SIZE + n_fields)
452
 
                               * sizeof *offsets
453
 
                               + sizeof *buf);
454
 
 
455
 
        buf = mem_heap_alloc(heap, sizeof *buf);
456
437
 
457
438
        tuple = dtuple_from_fields(&tuple_store, entry, n_fields);
458
439
        n_ext = dict_index_is_clust(index) ? dtuple_get_n_ext(tuple) : 0;
459
440
 
460
 
        rec = rec_convert_dtuple_to_rec(*buf, index, tuple, n_ext);
461
 
        offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
 
441
        rec = rec_convert_dtuple_to_rec(buf, index, tuple, n_ext);
 
442
        offsets = rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED,
 
443
                                  &heap);
462
444
 
463
445
        innobase_rec_to_mysql(dup->table, rec, index, offsets);
464
446
 
465
 
        mem_heap_free(heap);
 
447
        if (UNIV_LIKELY_NULL(heap)) {
 
448
                mem_heap_free(heap);
 
449
        }
466
450
}
467
451
 
468
452
/*************************************************************//**
633
617
}
634
618
 
635
619
/******************************************************//**
636
 
Create a memory heap and allocate space for row_merge_rec_offsets()
637
 
and mrec_buf_t[3].
 
620
Create a memory heap and allocate space for row_merge_rec_offsets().
638
621
@return memory heap */
639
622
static
640
623
mem_heap_t*
641
624
row_merge_heap_create(
642
625
/*==================*/
643
626
        const dict_index_t*     index,          /*!< in: record descriptor */
644
 
        mrec_buf_t**            buf,            /*!< out: 3 buffers */
645
627
        ulint**                 offsets1,       /*!< out: offsets */
646
628
        ulint**                 offsets2)       /*!< out: offsets */
647
629
{
648
630
        ulint           i       = 1 + REC_OFFS_HEADER_SIZE
649
631
                + dict_index_get_n_fields(index);
650
 
        mem_heap_t*     heap    = mem_heap_create(2 * i * sizeof **offsets1
651
 
                                                  + 3 * sizeof **buf);
 
632
        mem_heap_t*     heap    = mem_heap_create(2 * i * sizeof *offsets1);
652
633
 
653
 
        *buf = mem_heap_alloc(heap, 3 * sizeof **buf);
654
 
        *offsets1 = mem_heap_alloc(heap, i * sizeof **offsets1);
655
 
        *offsets2 = mem_heap_alloc(heap, i * sizeof **offsets2);
 
634
        *offsets1 = mem_heap_alloc(heap, i * sizeof *offsets1);
 
635
        *offsets2 = mem_heap_alloc(heap, i * sizeof *offsets2);
656
636
 
657
637
        (*offsets1)[0] = (*offsets2)[0] = i;
658
638
        (*offsets1)[1] = (*offsets2)[1] = dict_index_get_n_fields(index);
697
677
row_merge_read(
698
678
/*===========*/
699
679
        int                     fd,     /*!< in: file descriptor */
700
 
        ulint                   offset, /*!< in: offset where to read
701
 
                                        in number of row_merge_block_t
702
 
                                        elements */
 
680
        ulint                   offset, /*!< in: offset where to read */
703
681
        row_merge_block_t*      buf)    /*!< out: data */
704
682
{
705
683
        ib_uint64_t     ofs = ((ib_uint64_t) offset) * sizeof *buf;
706
684
        ibool           success;
707
685
 
708
 
#ifdef UNIV_DEBUG
709
 
        if (row_merge_print_block_read) {
710
 
                fprintf(stderr, "row_merge_read fd=%d ofs=%lu\n",
711
 
                        fd, (ulong) offset);
712
 
        }
713
 
#endif /* UNIV_DEBUG */
714
 
 
715
686
        success = os_file_read_no_error_handling(OS_FILE_FROM_FD(fd), buf,
716
687
                                                 (ulint) (ofs & 0xFFFFFFFF),
717
688
                                                 (ulint) (ofs >> 32),
718
689
                                                 sizeof *buf);
719
 
#ifdef POSIX_FADV_DONTNEED
720
 
        /* Each block is read exactly once.  Free up the file cache. */
721
 
        posix_fadvise(fd, ofs, sizeof *buf, POSIX_FADV_DONTNEED);
722
 
#endif /* POSIX_FADV_DONTNEED */
723
 
 
724
690
        if (UNIV_UNLIKELY(!success)) {
725
691
                ut_print_timestamp(stderr);
726
692
                fprintf(stderr,
731
697
}
732
698
 
733
699
/********************************************************************//**
734
 
Write a merge block to the file system.
 
700
Read a merge block from the file system.
735
701
@return TRUE if request was successful, FALSE if fail */
736
702
static
737
703
ibool
738
704
row_merge_write(
739
705
/*============*/
740
706
        int             fd,     /*!< in: file descriptor */
741
 
        ulint           offset, /*!< in: offset where to write,
742
 
                                in number of row_merge_block_t elements */
 
707
        ulint           offset, /*!< in: offset where to write */
743
708
        const void*     buf)    /*!< in: data */
744
709
{
745
 
        size_t          buf_len = sizeof(row_merge_block_t);
746
 
        ib_uint64_t     ofs = buf_len * (ib_uint64_t) offset;
747
 
        ibool           ret;
748
 
 
749
 
        ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf,
750
 
                            (ulint) (ofs & 0xFFFFFFFF),
751
 
                            (ulint) (ofs >> 32),
752
 
                            buf_len);
753
 
 
754
 
#ifdef UNIV_DEBUG
755
 
        if (row_merge_print_block_write) {
756
 
                fprintf(stderr, "row_merge_write fd=%d ofs=%lu\n",
757
 
                        fd, (ulong) offset);
758
 
        }
759
 
#endif /* UNIV_DEBUG */
760
 
 
761
 
#ifdef POSIX_FADV_DONTNEED
762
 
        /* The block will be needed on the next merge pass,
763
 
        but it can be evicted from the file cache meanwhile. */
764
 
        posix_fadvise(fd, ofs, buf_len, POSIX_FADV_DONTNEED);
765
 
#endif /* POSIX_FADV_DONTNEED */
766
 
 
767
 
        return(UNIV_LIKELY(ret));
 
710
        ib_uint64_t     ofs = ((ib_uint64_t) offset)
 
711
                * sizeof(row_merge_block_t);
 
712
 
 
713
        return(UNIV_LIKELY(os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf,
 
714
                                         (ulint) (ofs & 0xFFFFFFFF),
 
715
                                         (ulint) (ofs >> 32),
 
716
                                         sizeof(row_merge_block_t))));
768
717
}
769
718
 
770
719
/********************************************************************//**
771
720
Read a merge record.
772
721
@return pointer to next record, or NULL on I/O error or end of list */
773
 
static __attribute__((nonnull))
 
722
static
774
723
const byte*
775
724
row_merge_read_rec(
776
725
/*===============*/
1099
1048
                                                record to be compared */
1100
1049
        const ulint*            offsets1,       /*!< in: first record offsets */
1101
1050
        const ulint*            offsets2,       /*!< in: second record offsets */
1102
 
        const dict_index_t*     index,          /*!< in: index */
1103
 
        ibool*                  null_eq)        /*!< out: set to TRUE if
1104
 
                                                found matching null values */
 
1051
        const dict_index_t*     index)          /*!< in: index */
1105
1052
{
1106
1053
        int     cmp;
1107
1054
 
1108
 
        cmp = cmp_rec_rec_simple(mrec1, mrec2, offsets1, offsets2, index,
1109
 
                                 null_eq);
 
1055
        cmp = cmp_rec_rec_simple(mrec1, mrec2, offsets1, offsets2, index);
1110
1056
 
1111
1057
#ifdef UNIV_DEBUG
1112
1058
        if (row_merge_print_cmp) {
1125
1071
Reads clustered index of the table and create temporary files
1126
1072
containing the index entries for the indexes to be built.
1127
1073
@return DB_SUCCESS or error */
1128
 
static __attribute__((nonnull))
 
1074
static
1129
1075
ulint
1130
1076
row_merge_read_clustered_index(
1131
1077
/*===========================*/
1132
1078
        trx_t*                  trx,    /*!< in: transaction */
1133
 
        struct TABLE*           table,  /*!< in/out: MySQL table object,
 
1079
        TABLE*                  table,  /*!< in/out: MySQL table object,
1134
1080
                                        for reporting erroneous records */
1135
1081
        const dict_table_t*     old_table,/*!< in: table where rows are
1136
1082
                                        read from */
1230
1176
                in order to release the latch on the old page. */
1231
1177
 
1232
1178
                if (btr_pcur_is_after_last_on_page(&pcur)) {
1233
 
                        if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
1234
 
                                err = DB_INTERRUPTED;
1235
 
                                trx->error_key_num = 0;
1236
 
                                goto func_exit;
1237
 
                        }
1238
 
 
1239
1179
                        btr_pcur_store_position(&pcur, &mtr);
1240
1180
                        mtr_commit(&mtr);
1241
1181
                        mtr_start(&mtr);
1275
1215
 
1276
1216
                                        if (dfield_is_null(field)) {
1277
1217
                                                err = DB_PRIMARY_KEY_IS_NULL;
1278
 
                                                trx->error_key_num = 0;
 
1218
                                                i = 0;
 
1219
                                                trx->error_key_num = i;
1279
1220
                                                goto func_exit;
1280
1221
                                        }
1281
1222
 
1294
1235
 
1295
1236
                        if (UNIV_LIKELY
1296
1237
                            (row && row_merge_buf_add(buf, row, ext))) {
1297
 
                                file->n_rec++;
1298
1238
                                continue;
1299
1239
                        }
1300
1240
 
1316
1256
 
1317
1257
                                        if (dup.n_dup) {
1318
1258
                                                err = DB_DUPLICATE_KEY;
 
1259
err_exit:
1319
1260
                                                trx->error_key_num = i;
1320
1261
                                                goto func_exit;
1321
1262
                                        }
1329
1270
                        if (!row_merge_write(file->fd, file->offset++,
1330
1271
                                             block)) {
1331
1272
                                err = DB_OUT_OF_FILE_SPACE;
1332
 
                                trx->error_key_num = i;
1333
 
                                goto func_exit;
 
1273
                                goto err_exit;
1334
1274
                        }
1335
1275
 
1336
1276
                        UNIV_MEM_INVALID(block[0], sizeof block[0]);
1337
1277
                        merge_buf[i] = row_merge_buf_empty(buf);
1338
1278
 
1339
 
                        if (UNIV_LIKELY(row != NULL)) {
1340
 
                                /* Try writing the record again, now
1341
 
                                that the buffer has been written out
1342
 
                                and emptied. */
1343
 
 
1344
 
                                if (UNIV_UNLIKELY
1345
 
                                    (!row_merge_buf_add(buf, row, ext))) {
1346
 
                                        /* An empty buffer should have enough
1347
 
                                        room for at least one record. */
1348
 
                                        ut_error;
1349
 
                                }
1350
 
 
1351
 
                                file->n_rec++;
 
1279
                        /* Try writing the record again, now that
 
1280
                        the buffer has been written out and emptied. */
 
1281
 
 
1282
                        if (UNIV_UNLIKELY
 
1283
                            (row && !row_merge_buf_add(buf, row, ext))) {
 
1284
                                /* An empty buffer should have enough
 
1285
                                room for at least one record. */
 
1286
                                ut_error;
1352
1287
                        }
1353
1288
                }
1354
1289
 
1387
1322
                b2 = row_merge_write_rec(&block[2], &buf[2], b2,        \
1388
1323
                                         of->fd, &of->offset,           \
1389
1324
                                         mrec##N, offsets##N);          \
1390
 
                if (UNIV_UNLIKELY(!b2 || ++of->n_rec > file->n_rec)) {  \
 
1325
                if (UNIV_UNLIKELY(!b2)) {                               \
1391
1326
                        goto corrupt;                                   \
1392
1327
                }                                                       \
1393
1328
                b##N = row_merge_read_rec(&block[N], &buf[N],           \
1403
1338
        } while (0)
1404
1339
 
1405
1340
/*************************************************************//**
1406
 
Merge two blocks of records on disk and write a bigger block.
 
1341
Merge two blocks of linked lists on disk and write a bigger block.
1407
1342
@return DB_SUCCESS or error code */
1408
1343
static
1409
1344
ulint
1410
1345
row_merge_blocks(
1411
1346
/*=============*/
1412
1347
        const dict_index_t*     index,  /*!< in: index being created */
1413
 
        const merge_file_t*     file,   /*!< in: file containing
 
1348
        merge_file_t*           file,   /*!< in/out: file containing
1414
1349
                                        index entries */
1415
1350
        row_merge_block_t*      block,  /*!< in/out: 3 buffers */
1416
1351
        ulint*                  foffs0, /*!< in/out: offset of first
1418
1353
        ulint*                  foffs1, /*!< in/out: offset of second
1419
1354
                                        source list in the file */
1420
1355
        merge_file_t*           of,     /*!< in/out: output file */
1421
 
        struct TABLE*           table)  /*!< in/out: MySQL table, for
 
1356
        TABLE*                  table)  /*!< in/out: MySQL table, for
1422
1357
                                        reporting erroneous key value
1423
1358
                                        if applicable */
1424
1359
{
1425
1360
        mem_heap_t*     heap;   /*!< memory heap for offsets0, offsets1 */
1426
1361
 
1427
 
        mrec_buf_t*     buf;    /*!< buffer for handling
1428
 
                                split mrec in block[] */
 
1362
        mrec_buf_t      buf[3]; /*!< buffer for handling split mrec in block[] */
1429
1363
        const byte*     b0;     /*!< pointer to block[0] */
1430
1364
        const byte*     b1;     /*!< pointer to block[1] */
1431
1365
        byte*           b2;     /*!< pointer to block[2] */
1434
1368
        ulint*          offsets0;/* offsets of mrec0 */
1435
1369
        ulint*          offsets1;/* offsets of mrec1 */
1436
1370
 
1437
 
#ifdef UNIV_DEBUG
1438
 
        if (row_merge_print_block) {
1439
 
                fprintf(stderr,
1440
 
                        "row_merge_blocks fd=%d ofs=%lu + fd=%d ofs=%lu"
1441
 
                        " = fd=%d ofs=%lu\n",
1442
 
                        file->fd, (ulong) *foffs0,
1443
 
                        file->fd, (ulong) *foffs1,
1444
 
                        of->fd, (ulong) of->offset);
1445
 
        }
1446
 
#endif /* UNIV_DEBUG */
1447
 
 
1448
 
        heap = row_merge_heap_create(index, &buf, &offsets0, &offsets1);
1449
 
 
1450
 
        buf = mem_heap_alloc(heap, sizeof(mrec_buf_t) * 3);
 
1371
        heap = row_merge_heap_create(index, &offsets0, &offsets1);
1451
1372
 
1452
1373
        /* Write a record and read the next record.  Split the output
1453
1374
        file in two halves, which can be merged on the following pass. */
1474
1395
        }
1475
1396
 
1476
1397
        while (mrec0 && mrec1) {
1477
 
                ibool   null_eq = FALSE;
1478
1398
                switch (row_merge_cmp(mrec0, mrec1,
1479
 
                                      offsets0, offsets1, index,
1480
 
                                      &null_eq)) {
 
1399
                                      offsets0, offsets1, index)) {
1481
1400
                case 0:
1482
1401
                        if (UNIV_UNLIKELY
1483
 
                            (dict_index_is_unique(index) && !null_eq)) {
 
1402
                            (dict_index_is_unique(index))) {
1484
1403
                                innobase_rec_to_mysql(table, mrec0,
1485
1404
                                                      index, offsets0);
1486
1405
                                mem_heap_free(heap);
1521
1440
}
1522
1441
 
1523
1442
/*************************************************************//**
1524
 
Copy a block of index entries.
1525
 
@return TRUE on success, FALSE on failure */
1526
 
static __attribute__((nonnull))
1527
 
ibool
1528
 
row_merge_blocks_copy(
1529
 
/*==================*/
1530
 
        const dict_index_t*     index,  /*!< in: index being created */
1531
 
        const merge_file_t*     file,   /*!< in: input file */
1532
 
        row_merge_block_t*      block,  /*!< in/out: 3 buffers */
1533
 
        ulint*                  foffs0, /*!< in/out: input file offset */
1534
 
        merge_file_t*           of)     /*!< in/out: output file */
1535
 
{
1536
 
        mem_heap_t*     heap;   /*!< memory heap for offsets0, offsets1 */
1537
 
 
1538
 
        mrec_buf_t*     buf;    /*!< buffer for handling
1539
 
                                split mrec in block[] */
1540
 
        const byte*     b0;     /*!< pointer to block[0] */
1541
 
        byte*           b2;     /*!< pointer to block[2] */
1542
 
        const mrec_t*   mrec0;  /*!< merge rec, points to block[0] */
1543
 
        ulint*          offsets0;/* offsets of mrec0 */
1544
 
        ulint*          offsets1;/* dummy offsets */
1545
 
 
1546
 
#ifdef UNIV_DEBUG
1547
 
        if (row_merge_print_block) {
1548
 
                fprintf(stderr,
1549
 
                        "row_merge_blocks_copy fd=%d ofs=%lu"
1550
 
                        " = fd=%d ofs=%lu\n",
1551
 
                        file->fd, (ulong) foffs0,
1552
 
                        of->fd, (ulong) of->offset);
1553
 
        }
1554
 
#endif /* UNIV_DEBUG */
1555
 
 
1556
 
        heap = row_merge_heap_create(index, &buf, &offsets0, &offsets1);
1557
 
        buf = mem_heap_alloc(heap, sizeof(mrec_buf_t) * 3);
1558
 
 
1559
 
        /* Write a record and read the next record.  Split the output
1560
 
        file in two halves, which can be merged on the following pass. */
1561
 
 
1562
 
        if (!row_merge_read(file->fd, *foffs0, &block[0])) {
1563
 
corrupt:
1564
 
                mem_heap_free(heap);
1565
 
                return(FALSE);
1566
 
        }
1567
 
 
1568
 
        b0 = block[0];
1569
 
        b2 = block[2];
1570
 
 
1571
 
        b0 = row_merge_read_rec(&block[0], &buf[0], b0, index, file->fd,
1572
 
                                foffs0, &mrec0, offsets0);
1573
 
        if (UNIV_UNLIKELY(!b0 && mrec0)) {
1574
 
 
1575
 
                goto corrupt;
1576
 
        }
1577
 
 
1578
 
        if (mrec0) {
1579
 
                /* append all mrec0 to output */
1580
 
                for (;;) {
1581
 
                        ROW_MERGE_WRITE_GET_NEXT(0, goto done0);
1582
 
                }
1583
 
        }
1584
 
done0:
1585
 
 
1586
 
        /* The file offset points to the beginning of the last page
1587
 
        that has been read.  Update it to point to the next block. */
1588
 
        (*foffs0)++;
1589
 
 
1590
 
        mem_heap_free(heap);
1591
 
        return(row_merge_write_eof(&block[2], b2, of->fd, &of->offset)
1592
 
               != NULL);
1593
 
}
1594
 
 
1595
 
/*************************************************************//**
1596
1443
Merge disk files.
1597
1444
@return DB_SUCCESS or error code */
1598
 
static __attribute__((nonnull))
 
1445
static
1599
1446
ulint
1600
1447
row_merge(
1601
1448
/*======*/
1602
 
        trx_t*                  trx,    /*!< in: transaction */
1603
1449
        const dict_index_t*     index,  /*!< in: index being created */
1604
1450
        merge_file_t*           file,   /*!< in/out: file containing
1605
1451
                                        index entries */
 
1452
        ulint                   half,   /*!< in: half the file */
1606
1453
        row_merge_block_t*      block,  /*!< in/out: 3 buffers */
1607
1454
        int*                    tmpfd,  /*!< in/out: temporary file handle */
1608
 
        struct TABLE*           table,  /*!< in/out: MySQL table, for
1609
 
                                        reporting erroneous key value
1610
 
                                        if applicable */
1611
 
        ulint*                  num_run,/*!< in/out: Number of runs remain
1612
 
                                        to be merged */
1613
 
        ulint*                  run_offset) /*!< in/out: Array contains the
1614
 
                                        first offset number for each merge
1615
 
                                        run */
 
1455
        TABLE*                  table)  /*!< in/out: MySQL table, for
 
1456
                                        reporting erroneous key value
 
1457
                                        if applicable */
1616
1458
{
1617
1459
        ulint           foffs0; /*!< first input offset */
1618
1460
        ulint           foffs1; /*!< second input offset */
1619
1461
        ulint           error;  /*!< error code */
1620
1462
        merge_file_t    of;     /*!< output file */
1621
 
        const ulint     ihalf   = run_offset[*num_run / 2];
1622
 
                                /*!< half the input file */
1623
 
        ulint           n_run   = 0;
1624
 
                                /*!< num of runs generated from this merge */
1625
 
 
1626
1463
 
1627
1464
        UNIV_MEM_ASSERT_W(block[0], 3 * sizeof block[0]);
1628
 
        ut_ad(ihalf < file->offset);
 
1465
        ut_ad(half > 0);
1629
1466
 
1630
1467
        of.fd = *tmpfd;
1631
1468
        of.offset = 0;
1632
 
        of.n_rec = 0;
1633
 
 
1634
 
#ifdef POSIX_FADV_SEQUENTIAL
1635
 
        /* The input file will be read sequentially, starting from the
1636
 
        beginning and the middle.  In Linux, the POSIX_FADV_SEQUENTIAL
1637
 
        affects the entire file.  Each block will be read exactly once. */
1638
 
        posix_fadvise(file->fd, 0, 0,
1639
 
                      POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE);
1640
 
#endif /* POSIX_FADV_SEQUENTIAL */
1641
1469
 
1642
1470
        /* Merge blocks to the output file. */
1643
1471
        foffs0 = 0;
1644
 
        foffs1 = ihalf;
1645
 
 
1646
 
        UNIV_MEM_INVALID(run_offset, *num_run * sizeof *run_offset);
1647
 
 
1648
 
        for (; foffs0 < ihalf && foffs1 < file->offset; foffs0++, foffs1++) {
1649
 
 
1650
 
                if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
1651
 
                        return(DB_INTERRUPTED);
1652
 
                }
1653
 
 
1654
 
                /* Remember the offset number for this run */
1655
 
                run_offset[n_run++] = of.offset;
1656
 
 
 
1472
        foffs1 = half;
 
1473
 
 
1474
        for (; foffs0 < half && foffs1 < file->offset; foffs0++, foffs1++) {
1657
1475
                error = row_merge_blocks(index, file, block,
1658
1476
                                         &foffs0, &foffs1, &of, table);
1659
1477
 
1660
1478
                if (error != DB_SUCCESS) {
1661
1479
                        return(error);
1662
1480
                }
1663
 
 
1664
1481
        }
1665
1482
 
1666
 
        /* Copy the last blocks, if there are any. */
1667
 
 
1668
 
        while (foffs0 < ihalf) {
1669
 
                if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
1670
 
                        return(DB_INTERRUPTED);
1671
 
                }
1672
 
 
1673
 
                /* Remember the offset number for this run */
1674
 
                run_offset[n_run++] = of.offset;
1675
 
 
1676
 
                if (!row_merge_blocks_copy(index, file, block, &foffs0, &of)) {
 
1483
        /* Copy the last block, if there is one. */
 
1484
        while (foffs0 < half) {
 
1485
                if (!row_merge_read(file->fd, foffs0++, block)
 
1486
                    || !row_merge_write(of.fd, of.offset++, block)) {
1677
1487
                        return(DB_CORRUPTION);
1678
1488
                }
1679
1489
        }
1680
 
 
1681
 
        ut_ad(foffs0 == ihalf);
1682
 
 
1683
1490
        while (foffs1 < file->offset) {
1684
 
                if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
1685
 
                        return(DB_INTERRUPTED);
1686
 
                }
1687
 
 
1688
 
                /* Remember the offset number for this run */
1689
 
                run_offset[n_run++] = of.offset;
1690
 
 
1691
 
                if (!row_merge_blocks_copy(index, file, block, &foffs1, &of)) {
 
1491
                if (!row_merge_read(file->fd, foffs1++, block)
 
1492
                    || !row_merge_write(of.fd, of.offset++, block)) {
1692
1493
                        return(DB_CORRUPTION);
1693
1494
                }
1694
1495
        }
1695
1496
 
1696
 
        ut_ad(foffs1 == file->offset);
1697
 
 
1698
 
        if (UNIV_UNLIKELY(of.n_rec != file->n_rec)) {
1699
 
                return(DB_CORRUPTION);
1700
 
        }
1701
 
 
1702
 
        ut_ad(n_run <= *num_run);
1703
 
 
1704
 
        *num_run = n_run;
1705
 
 
1706
 
        /* Each run can contain one or more offsets. As merge goes on,
1707
 
        the number of runs (to merge) will reduce until we have one
1708
 
        single run. So the number of runs will always be smaller than
1709
 
        the number of offsets in file */
1710
 
        ut_ad((*num_run) <= file->offset);
1711
 
 
1712
 
        /* The number of offsets in output file is always equal or
1713
 
        smaller than input file */
1714
 
        ut_ad(of.offset <= file->offset);
1715
 
 
1716
1497
        /* Swap file descriptors for the next pass. */
1717
1498
        *tmpfd = file->fd;
1718
1499
        *file = of;
1729
1510
ulint
1730
1511
row_merge_sort(
1731
1512
/*===========*/
1732
 
        trx_t*                  trx,    /*!< in: transaction */
1733
1513
        const dict_index_t*     index,  /*!< in: index being created */
1734
1514
        merge_file_t*           file,   /*!< in/out: file containing
1735
1515
                                        index entries */
1736
1516
        row_merge_block_t*      block,  /*!< in/out: 3 buffers */
1737
1517
        int*                    tmpfd,  /*!< in/out: temporary file handle */
1738
 
        struct TABLE*           table)  /*!< in/out: MySQL table, for
 
1518
        TABLE*                  table)  /*!< in/out: MySQL table, for
1739
1519
                                        reporting erroneous key value
1740
1520
                                        if applicable */
1741
1521
{
1742
 
        ulint   half = file->offset / 2;
1743
 
        ulint   num_runs;
1744
 
        ulint*  run_offset;
1745
 
        ulint   error = DB_SUCCESS;
1746
 
 
1747
 
        /* Record the number of merge runs we need to perform */
1748
 
        num_runs = file->offset;
1749
 
 
1750
 
        /* If num_runs are less than 1, nothing to merge */
1751
 
        if (num_runs <= 1) {
1752
 
                return(error);
1753
 
        }
1754
 
 
1755
 
        /* "run_offset" records each run's first offset number */
1756
 
        run_offset = (ulint*) mem_alloc(file->offset * sizeof(ulint));
1757
 
 
1758
 
        /* This tells row_merge() where to start for the first round
1759
 
        of merge. */
1760
 
        run_offset[half] = half;
1761
 
 
1762
 
        /* The file should always contain at least one byte (the end
1763
 
        of file marker).  Thus, it must be at least one block. */
1764
 
        ut_ad(file->offset > 0);
1765
 
 
1766
 
        /* Merge the runs until we have one big run */
1767
 
        do {
1768
 
                error = row_merge(trx, index, file, block, tmpfd,
1769
 
                                  table, &num_runs, run_offset);
1770
 
 
1771
 
                UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
 
1522
        ulint   blksz;  /*!< block size */
 
1523
 
 
1524
        for (blksz = 1; blksz < file->offset; blksz *= 2) {
 
1525
                ulint   half;
 
1526
                ulint   error;
 
1527
 
 
1528
                ut_ad(ut_is_2pow(blksz));
 
1529
                half = ut_2pow_round((file->offset + (blksz - 1)) / 2, blksz);
 
1530
                error = row_merge(index, file, half, block, tmpfd, table);
1772
1531
 
1773
1532
                if (error != DB_SUCCESS) {
1774
 
                        break;
 
1533
                        return(error);
1775
1534
                }
1776
 
        } while (num_runs > 1);
1777
 
 
1778
 
        mem_free(run_offset);
1779
 
 
1780
 
        return(error);
 
1535
        }
 
1536
 
 
1537
        return(DB_SUCCESS);
1781
1538
}
1782
1539
 
1783
1540
/*************************************************************//**
1813
1570
                (below). */
1814
1571
                data = btr_rec_copy_externally_stored_field(
1815
1572
                        mrec, offsets, zip_size, i, &len, heap);
1816
 
                /* Because we have locked the table, any records
1817
 
                written by incomplete transactions must have been
1818
 
                rolled back already. There must not be any incomplete
1819
 
                BLOB columns. */
1820
 
                ut_a(data);
1821
1573
 
1822
1574
                dfield_set_data(field, data, len);
1823
1575
        }
1839
1591
        int                     fd,     /*!< in: file descriptor */
1840
1592
        row_merge_block_t*      block)  /*!< in/out: file buffer */
1841
1593
{
 
1594
        mrec_buf_t              buf;
1842
1595
        const byte*             b;
1843
1596
        que_thr_t*              thr;
1844
1597
        ins_node_t*             node;
1857
1610
 
1858
1611
        trx->op_info = "inserting index entries";
1859
1612
 
1860
 
        graph_heap = mem_heap_create(500 + sizeof(mrec_buf_t));
 
1613
        graph_heap = mem_heap_create(500);
1861
1614
        node = ins_node_create(INS_DIRECT, table, graph_heap);
1862
1615
 
1863
1616
        thr = pars_complete_graph_for_exec(node, trx, graph_heap);
1879
1632
        if (!row_merge_read(fd, foffs, block)) {
1880
1633
                error = DB_CORRUPTION;
1881
1634
        } else {
1882
 
                mrec_buf_t*     buf = mem_heap_alloc(graph_heap, sizeof *buf);
1883
 
 
1884
1635
                for (;;) {
1885
1636
                        const mrec_t*   mrec;
1886
1637
                        dtuple_t*       dtuple;
1887
1638
                        ulint           n_ext;
1888
1639
 
1889
 
                        b = row_merge_read_rec(block, buf, b, index,
 
1640
                        b = row_merge_read_rec(block, &buf, b, index,
1890
1641
                                               fd, &foffs, &mrec, offsets);
1891
1642
                        if (UNIV_UNLIKELY(!b)) {
1892
1643
                                /* End of list, or I/O error */
2048
1799
        static const char str1[] =
2049
1800
                "PROCEDURE DROP_INDEX_PROC () IS\n"
2050
1801
                "BEGIN\n"
2051
 
                /* Rename the index, so that it will be dropped by
2052
 
                row_merge_drop_temp_indexes() at crash recovery
2053
 
                if the server crashes before this trx is committed. */
2054
 
                "UPDATE SYS_INDEXES SET NAME=CONCAT('"
2055
 
                TEMP_INDEX_PREFIX_STR "', NAME) WHERE ID = :indexid;\n"
2056
 
                "COMMIT WORK;\n"
2057
 
                /* Drop the field definitions of the index. */
2058
1802
                "DELETE FROM SYS_FIELDS WHERE INDEX_ID = :indexid;\n"
2059
 
                /* Drop the index definition and the B-tree. */
2060
 
                "DELETE FROM SYS_INDEXES WHERE ID = :indexid;\n"
 
1803
                "DELETE FROM SYS_INDEXES WHERE ID = :indexid\n"
 
1804
                "               AND TABLE_ID = :tableid;\n"
2061
1805
                "END;\n";
2062
1806
 
2063
1807
        ut_ad(index && table && trx);
2064
1808
 
2065
 
        pars_info_add_ull_literal(info, "indexid", index->id);
 
1809
        pars_info_add_dulint_literal(info, "indexid", index->id);
 
1810
        pars_info_add_dulint_literal(info, "tableid", table->id);
2066
1811
 
2067
1812
        trx_start_if_not_started(trx);
2068
1813
        trx->op_info = "dropping index";
2111
1856
/*=============================*/
2112
1857
{
2113
1858
        trx_t*          trx;
2114
 
        btr_pcur_t      pcur;
2115
 
        mtr_t           mtr;
 
1859
        ulint           err;
2116
1860
 
2117
 
        /* Load the table definitions that contain partially defined
2118
 
        indexes, so that the data dictionary information can be checked
2119
 
        when accessing the tablename.ibd files. */
 
1861
        /* We use the private SQL parser of Innobase to generate the
 
1862
        query graphs needed in deleting the dictionary data from system
 
1863
        tables in Innobase. Deleting a row from SYS_INDEXES table also
 
1864
        frees the file segments of the B-tree associated with the index. */
 
1865
        static const char drop_temp_indexes[] =
 
1866
                "PROCEDURE DROP_TEMP_INDEXES_PROC () IS\n"
 
1867
                "indexid CHAR;\n"
 
1868
                "DECLARE CURSOR c IS SELECT ID FROM SYS_INDEXES\n"
 
1869
                "WHERE SUBSTR(NAME,0,1)='" TEMP_INDEX_PREFIX_STR "';\n"
 
1870
                "BEGIN\n"
 
1871
                "\tOPEN c;\n"
 
1872
                "\tWHILE 1=1 LOOP\n"
 
1873
                "\t\tFETCH c INTO indexid;\n"
 
1874
                "\t\tIF (SQL % NOTFOUND) THEN\n"
 
1875
                "\t\t\tEXIT;\n"
 
1876
                "\t\tEND IF;\n"
 
1877
                "\t\tDELETE FROM SYS_FIELDS WHERE INDEX_ID = indexid;\n"
 
1878
                "\t\tDELETE FROM SYS_INDEXES WHERE ID = indexid;\n"
 
1879
                "\tEND LOOP;\n"
 
1880
                "\tCLOSE c;\n"
 
1881
                "\tCOMMIT WORK;\n"
 
1882
                "END;\n";
2120
1883
 
2121
1884
        trx = trx_allocate_for_background();
2122
1885
        trx->op_info = "dropping partially created indexes";
2123
1886
        row_mysql_lock_data_dictionary(trx);
2124
1887
 
2125
 
        mtr_start(&mtr);
2126
 
 
2127
 
        btr_pcur_open_at_index_side(
2128
 
                TRUE,
2129
 
                dict_table_get_first_index(dict_sys->sys_indexes),
2130
 
                BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
2131
 
 
2132
 
        for (;;) {
2133
 
                const rec_t*    rec;
2134
 
                const byte*     field;
2135
 
                ulint           len;
2136
 
                table_id_t      table_id;
2137
 
                dict_table_t*   table;
2138
 
 
2139
 
                btr_pcur_move_to_next_user_rec(&pcur, &mtr);
2140
 
 
2141
 
                if (!btr_pcur_is_on_user_rec(&pcur)) {
2142
 
                        break;
2143
 
                }
2144
 
 
2145
 
                rec = btr_pcur_get_rec(&pcur);
2146
 
                field = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_NAME_FIELD,
2147
 
                                              &len);
2148
 
                if (len == UNIV_SQL_NULL || len == 0
2149
 
                    || (char) *field != TEMP_INDEX_PREFIX) {
2150
 
                        continue;
2151
 
                }
2152
 
 
2153
 
                /* This is a temporary index. */
2154
 
 
2155
 
                field = rec_get_nth_field_old(rec, 0/*TABLE_ID*/, &len);
2156
 
                if (len != 8) {
2157
 
                        /* Corrupted TABLE_ID */
2158
 
                        continue;
2159
 
                }
2160
 
 
2161
 
                table_id = mach_read_from_8(field);
2162
 
 
2163
 
                btr_pcur_store_position(&pcur, &mtr);
2164
 
                btr_pcur_commit_specify_mtr(&pcur, &mtr);
2165
 
 
2166
 
                table = dict_table_get_on_id_low(table_id);
2167
 
 
2168
 
                if (table) {
2169
 
                        dict_index_t*   index;
2170
 
                        dict_index_t*   next_index;
2171
 
 
2172
 
                        for (index = dict_table_get_first_index(table);
2173
 
                             index; index = next_index) {
2174
 
 
2175
 
                                next_index = dict_table_get_next_index(index);
2176
 
 
2177
 
                                if (*index->name == TEMP_INDEX_PREFIX) {
2178
 
                                        row_merge_drop_index(index, table, trx);
2179
 
                                        trx_commit_for_mysql(trx);
2180
 
                                }
2181
 
                        }
2182
 
                }
2183
 
 
2184
 
                mtr_start(&mtr);
2185
 
                btr_pcur_restore_position(BTR_SEARCH_LEAF,
2186
 
                                          &pcur, &mtr);
2187
 
        }
2188
 
 
2189
 
        btr_pcur_close(&pcur);
2190
 
        mtr_commit(&mtr);
 
1888
        /* Incomplete transactions may be holding some locks on the
 
1889
        data dictionary tables.  However, they should never have been
 
1890
        able to lock the records corresponding to the partially
 
1891
        created indexes that we are attempting to delete, because the
 
1892
        table was locked when the indexes were being created.  We will
 
1893
        drop the partially created indexes before the rollback of
 
1894
        incomplete transactions is initiated.  Thus, this should not
 
1895
        interfere with the incomplete transactions. */
 
1896
        trx->isolation_level = TRX_ISO_READ_UNCOMMITTED;
 
1897
        pars_info_t *info = pars_info_create();
 
1898
        err = que_eval_sql(info, drop_temp_indexes, FALSE, trx);
 
1899
        ut_a(err == DB_SUCCESS);
 
1900
 
2191
1901
        row_mysql_unlock_data_dictionary(trx);
2192
1902
        trx_free_for_background(trx);
2193
1903
}
2200
1910
/*==================*/
2201
1911
        merge_file_t*   merge_file)     /*!< out: merge file structure */
2202
1912
{
2203
 
#ifdef UNIV_PFS_IO
2204
 
        /* This temp file open does not go through normal
2205
 
        file APIs, add instrumentation to register with
2206
 
        performance schema */
2207
 
        struct PSI_file_locker* locker = NULL;
2208
 
        PSI_file_locker_state   state;
2209
 
        register_pfs_file_open_begin(&state, locker, innodb_file_temp_key,
2210
 
                                     PSI_FILE_OPEN,
2211
 
                                     "Innodb Merge Temp File",
2212
 
                                     __FILE__, __LINE__);
2213
 
#endif
2214
1913
        merge_file->fd = innobase_mysql_tmpfile();
2215
1914
        merge_file->offset = 0;
2216
 
        merge_file->n_rec = 0;
2217
 
#ifdef UNIV_PFS_IO
2218
 
        register_pfs_file_open_end(locker, merge_file->fd);
2219
 
#endif
2220
1915
}
2221
1916
 
2222
1917
/*********************************************************************//**
2227
1922
/*===================*/
2228
1923
        merge_file_t*   merge_file)     /*!< out: merge file structure */
2229
1924
{
2230
 
#ifdef UNIV_PFS_IO
2231
 
        struct PSI_file_locker* locker = NULL;
2232
 
        PSI_file_locker_state   state;
2233
 
        register_pfs_file_io_begin(&state, locker, merge_file->fd, 0, PSI_FILE_CLOSE,
2234
 
                                   __FILE__, __LINE__);
2235
 
#endif
2236
1925
        if (merge_file->fd != -1) {
2237
1926
                close(merge_file->fd);
2238
1927
                merge_file->fd = -1;
2239
1928
        }
2240
 
 
2241
 
#ifdef UNIV_PFS_IO
2242
 
        register_pfs_file_io_end(locker, 0);
2243
 
#endif
2244
1929
}
2245
1930
 
2246
1931
/*********************************************************************//**
2362
2047
 
2363
2048
        trx->op_info = "renaming indexes";
2364
2049
 
2365
 
        pars_info_add_ull_literal(info, "tableid", table->id);
 
2050
        pars_info_add_dulint_literal(info, "tableid", table->id);
2366
2051
 
2367
2052
        err = que_eval_sql(info, rename_indexes, FALSE, trx);
2368
2053
 
2399
2084
{
2400
2085
        ulint           err     = DB_ERROR;
2401
2086
        pars_info_t*    info;
2402
 
        char            old_name[MAX_TABLE_NAME_LEN + 1];
 
2087
        const char*     old_name= old_table->name;
2403
2088
 
2404
2089
        ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
2405
2090
        ut_ad(old_table != new_table);
2407
2092
 
2408
2093
        ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
2409
2094
 
2410
 
        /* store the old/current name to an automatic variable */
2411
 
        if (strlen(old_table->name) + 1 <= sizeof(old_name)) {
2412
 
                memcpy(old_name, old_table->name, strlen(old_table->name) + 1);
2413
 
        } else {
2414
 
                ut_print_timestamp(stderr);
2415
 
                fprintf(stderr, "InnoDB: too long table name: '%s', "
2416
 
                        "max length is %d\n", old_table->name,
2417
 
                        MAX_TABLE_NAME_LEN);
2418
 
                ut_error;
2419
 
        }
2420
 
 
2421
 
        /* store the old/current name to an automatic variable */
2422
 
        if (strlen(old_table->name) + 1 <= sizeof(old_name)) {
2423
 
                memcpy(old_name, old_table->name, strlen(old_table->name) + 1);
2424
 
        } else {
2425
 
                ut_print_timestamp(stderr);
2426
 
                fprintf(stderr, "InnoDB: too long table name: '%s', "
2427
 
                        "max length is %d\n", old_table->name,
2428
 
                        MAX_TABLE_NAME_LEN);
2429
 
                ut_error;
2430
 
        }
2431
 
 
2432
2095
        trx->op_info = "renaming tables";
2433
2096
 
2434
2097
        /* We use the private SQL parser of Innobase to generate the query
2464
2127
                goto err_exit;
2465
2128
        }
2466
2129
 
2467
 
        err = dict_load_foreigns(old_name, FALSE, TRUE);
 
2130
        err = dict_load_foreigns(old_name, TRUE);
2468
2131
 
2469
2132
        if (err != DB_SUCCESS) {
2470
2133
err_exit:
2471
2134
                trx->error_state = DB_SUCCESS;
2472
 
                trx_general_rollback_for_mysql(trx, NULL);
 
2135
                trx_general_rollback_for_mysql(trx, FALSE, NULL);
2473
2136
                trx->error_state = DB_SUCCESS;
2474
2137
        }
2475
2138
 
2561
2224
                /* Note the id of the transaction that created this
2562
2225
                index, we use it to restrict readers from accessing
2563
2226
                this index, to ensure read consistency. */
2564
 
                index->trx_id = trx->id;
 
2227
                index->trx_id = (ib_uint64_t)
 
2228
                        ut_conv_dulint_to_longlong(trx->id);
2565
2229
        } else {
2566
2230
                index = NULL;
2567
2231
        }
2578
2242
        const trx_t*            trx,    /*!< in: transaction */
2579
2243
        const dict_index_t*     index)  /*!< in: index to check */
2580
2244
{
2581
 
        return(!trx->read_view
2582
 
               || read_view_sees_trx_id(trx->read_view, index->trx_id));
 
2245
        return(!trx->read_view || read_view_sees_trx_id(
 
2246
                       trx->read_view,
 
2247
                       ut_dulint_create((ulint) (index->trx_id >> 32),
 
2248
                                        (ulint) index->trx_id & 0xFFFFFFFF)));
2583
2249
}
2584
2250
 
2585
2251
/*********************************************************************//**
2615
2281
                                        unless creating a PRIMARY KEY */
2616
2282
        dict_index_t**  indexes,        /*!< in: indexes to be created */
2617
2283
        ulint           n_indexes,      /*!< in: size of indexes[] */
2618
 
        TABLE*  table)          /*!< in/out: MySQL table, for
 
2284
        TABLE*          table)          /*!< in/out: MySQL table, for
2619
2285
                                        reporting erroneous key value
2620
2286
                                        if applicable */
2621
2287
{
2668
2334
        sorting and inserting. */
2669
2335
 
2670
2336
        for (i = 0; i < n_indexes; i++) {
2671
 
                error = row_merge_sort(trx, indexes[i], &merge_files[i],
 
2337
                error = row_merge_sort(indexes[i], &merge_files[i],
2672
2338
                                       block, &tmpfd, table);
2673
2339
 
2674
2340
                if (error == DB_SUCCESS) {