~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/row/row0merge.c

Merge Revision revid:marko.makela@oracle.com-20100601140355-u3kxl0yl0ljl5tx9 from MySQL InnoDB

Original revid:marko.makela@oracle.com-20100601140355-u3kxl0yl0ljl5tx9

Original Authors: Marko Mäkelä <marko.makela@oracle.com>
Original commit message:
Merge a change from mysql-5.1-innodb:

  ------------------------------------------------------------
  revno: 3491
  revision-id: marko.makela@oracle.com-20100601134335-ccthwwru23kn09qw
  parent: marko.makela@oracle.com-20100601120751-1uq7bbta5n7ts0qr
  committer: Marko Mäkelä <marko.makela@oracle.com>
  branch nick: 5.1-innodb
  timestamp: Tue 2010-06-01 16:43:35 +0300
  message:
    Bug#48197: Concurrent rw_lock_free may cause assertion failure

    rw_lock_t: Remove magic_n unless UNIV_DEBUG is defined.
    rw_lock_free(): Invalidate magic_n only after removing from rw_lock_list.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (C) 2005, 2010, Innobase Oy. All Rights Reserved.
 
3
Copyright (c) 2005, 2010, Innobase Oy. All Rights Reserved.
4
4
 
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
187
187
        ut_ad(max_tuples <= sizeof(row_merge_block_t));
188
188
        ut_ad(max_tuples < buf_size);
189
189
 
190
 
        buf = static_cast<row_merge_buf_t *>(mem_heap_zalloc(heap, buf_size));
 
190
        buf = mem_heap_zalloc(heap, buf_size);
191
191
        buf->heap = heap;
192
192
        buf->index = index;
193
193
        buf->max_tuples = max_tuples;
194
 
        buf->tuples = static_cast<const dfield_t **>(mem_heap_alloc(heap,
195
 
                                     2 * max_tuples * sizeof *buf->tuples));
 
194
        buf->tuples = mem_heap_alloc(heap,
 
195
                                     2 * max_tuples * sizeof *buf->tuples);
196
196
        buf->tmp_tuples = buf->tuples + max_tuples;
197
197
 
198
198
        return(buf);
287
287
 
288
288
        n_fields = dict_index_get_n_fields(index);
289
289
 
290
 
        entry = static_cast<dfield_t *>(mem_heap_alloc(buf->heap, n_fields * sizeof *entry));
 
290
        entry = mem_heap_alloc(buf->heap, n_fields * sizeof *entry);
291
291
        buf->tuples[buf->n_tuples] = entry;
292
292
        field = entry;
293
293
 
314
314
                } else if (UNIV_LIKELY(!ext)) {
315
315
                } else if (dict_index_is_clust(index)) {
316
316
                        /* Flag externally stored fields. */
317
 
                        const byte*     row_buf = row_ext_lookup(ext, col_no,
 
317
                        const byte*     buf = row_ext_lookup(ext, col_no,
318
318
                                                             &len);
319
 
                        if (UNIV_LIKELY_NULL(row_buf)) {
320
 
                                ut_a(row_buf != field_ref_zero);
 
319
                        if (UNIV_LIKELY_NULL(buf)) {
 
320
                                ut_a(buf != field_ref_zero);
321
321
                                if (i < dict_index_get_n_unique(index)) {
322
 
                                        dfield_set_data(field, row_buf, len);
 
322
                                        dfield_set_data(field, buf, len);
323
323
                                } else {
324
324
                                        dfield_set_ext(field);
325
325
                                        len = dfield_get_len(field);
326
326
                                }
327
327
                        }
328
328
                } else {
329
 
                        const byte*     row_buf = row_ext_lookup(ext, col_no,
 
329
                        const byte*     buf = row_ext_lookup(ext, col_no,
330
330
                                                             &len);
331
 
                        if (UNIV_LIKELY_NULL(row_buf)) {
332
 
                                ut_a(row_buf != field_ref_zero);
333
 
                                dfield_set_data(field, row_buf, len);
 
331
                        if (UNIV_LIKELY_NULL(buf)) {
 
332
                                ut_a(buf != field_ref_zero);
 
333
                                dfield_set_data(field, buf, len);
334
334
                        }
335
335
                }
336
336
 
339
339
                if (ifield->prefix_len) {
340
340
                        len = dtype_get_at_most_n_mbchars(
341
341
                                col->prtype,
342
 
                                col->mbminmaxlen,
 
342
                                col->mbminlen, col->mbmaxlen,
343
343
                                ifield->prefix_len,
344
 
                                len, static_cast<const char *>(dfield_get_data(field)));
 
344
                                len, dfield_get_data(field));
345
345
                        dfield_set_len(field, len);
346
346
                }
347
347
 
415
415
/** Structure for reporting duplicate records. */
416
416
struct row_merge_dup_struct {
417
417
        const dict_index_t*     index;          /*!< index being sorted */
418
 
        TABLE*          table;          /*!< MySQL table object */
 
418
        struct TABLE*           table;          /*!< MySQL table object */
419
419
        ulint                   n_dup;          /*!< number of duplicates */
420
420
};
421
421
 
452
452
                               * sizeof *offsets
453
453
                               + sizeof *buf);
454
454
 
455
 
        buf = static_cast<mrec_buf_t *>(mem_heap_alloc(heap, sizeof *buf));
 
455
        buf = mem_heap_alloc(heap, sizeof *buf);
456
456
 
457
457
        tuple = dtuple_from_fields(&tuple_store, entry, n_fields);
458
458
        n_ext = dict_index_is_clust(index) ? dtuple_get_n_ext(tuple) : 0;
650
650
        mem_heap_t*     heap    = mem_heap_create(2 * i * sizeof **offsets1
651
651
                                                  + 3 * sizeof **buf);
652
652
 
653
 
        *buf = static_cast<mrec_buf_t*>(mem_heap_alloc(heap, 3 * sizeof **buf));
654
 
        *offsets1 = static_cast<ulint*>(mem_heap_alloc(heap, i * sizeof **offsets1));
655
 
        *offsets2 = static_cast<ulint*>(mem_heap_alloc(heap, i * sizeof **offsets2));
 
653
        *buf = mem_heap_alloc(heap, 3 * sizeof **buf);
 
654
        *offsets1 = mem_heap_alloc(heap, i * sizeof **offsets1);
 
655
        *offsets2 = mem_heap_alloc(heap, i * sizeof **offsets2);
656
656
 
657
657
        (*offsets1)[0] = (*offsets2)[0] = i;
658
658
        (*offsets1)[1] = (*offsets2)[1] = dict_index_get_n_fields(index);
675
675
        dict_index_t*   index;
676
676
        const char**    column_names;
677
677
 
678
 
        column_names = static_cast<const char **>(mem_alloc(index_def->n_fields * sizeof *column_names));
 
678
        column_names = mem_alloc(index_def->n_fields * sizeof *column_names);
679
679
 
680
680
        for (i = 0; i < index_def->n_fields; ++i) {
681
681
                column_names[i] = index_def->fields[i].field_name;
1130
1130
row_merge_read_clustered_index(
1131
1131
/*===========================*/
1132
1132
        trx_t*                  trx,    /*!< in: transaction */
1133
 
        TABLE*          table,  /*!< in/out: MySQL table object,
 
1133
        struct TABLE*           table,  /*!< in/out: MySQL table object,
1134
1134
                                        for reporting erroneous records */
1135
1135
        const dict_table_t*     old_table,/*!< in: table where rows are
1136
1136
                                        read from */
1165
1165
 
1166
1166
        /* Create and initialize memory for record buffers */
1167
1167
 
1168
 
        merge_buf = static_cast<row_merge_buf_t **>(mem_alloc(n_index * sizeof *merge_buf));
 
1168
        merge_buf = mem_alloc(n_index * sizeof *merge_buf);
1169
1169
 
1170
1170
        for (i = 0; i < n_index; i++) {
1171
1171
                merge_buf[i] = row_merge_buf_create(index[i]);
1192
1192
 
1193
1193
                ut_a(n_cols == dict_table_get_n_cols(new_table));
1194
1194
 
1195
 
                nonnull = static_cast<ulint*>(mem_alloc(n_cols * sizeof *nonnull));
 
1195
                nonnull = mem_alloc(n_cols * sizeof *nonnull);
1196
1196
 
1197
1197
                for (i = 0; i < n_cols; i++) {
1198
1198
                        if (dict_table_get_nth_col(old_table, i)->prtype
1290
1290
                for (i = 0; i < n_index; i++) {
1291
1291
                        row_merge_buf_t*        buf     = merge_buf[i];
1292
1292
                        merge_file_t*           file    = &files[i];
1293
 
                        const dict_index_t*     buf_index       = buf->index;
 
1293
                        const dict_index_t*     index   = buf->index;
1294
1294
 
1295
1295
                        if (UNIV_LIKELY
1296
1296
                            (row && row_merge_buf_add(buf, row, ext))) {
1306
1306
                        Sort them and write to disk. */
1307
1307
 
1308
1308
                        if (buf->n_tuples) {
1309
 
                                if (dict_index_is_unique(buf_index)) {
 
1309
                                if (dict_index_is_unique(index)) {
1310
1310
                                        row_merge_dup_t dup;
1311
1311
                                        dup.index = buf->index;
1312
1312
                                        dup.table = table;
1418
1418
        ulint*                  foffs1, /*!< in/out: offset of second
1419
1419
                                        source list in the file */
1420
1420
        merge_file_t*           of,     /*!< in/out: output file */
1421
 
        TABLE*          table)  /*!< in/out: MySQL table, for
 
1421
        struct TABLE*           table)  /*!< in/out: MySQL table, for
1422
1422
                                        reporting erroneous key value
1423
1423
                                        if applicable */
1424
1424
{
1447
1447
 
1448
1448
        heap = row_merge_heap_create(index, &buf, &offsets0, &offsets1);
1449
1449
 
1450
 
        buf = static_cast<mrec_buf_t *>(mem_heap_alloc(heap, sizeof(mrec_buf_t) * 3));
 
1450
        buf = mem_heap_alloc(heap, sizeof(mrec_buf_t) * 3);
1451
1451
 
1452
1452
        /* Write a record and read the next record.  Split the output
1453
1453
        file in two halves, which can be merged on the following pass. */
1554
1554
#endif /* UNIV_DEBUG */
1555
1555
 
1556
1556
        heap = row_merge_heap_create(index, &buf, &offsets0, &offsets1);
1557
 
        buf = static_cast<mrec_buf_t *>(mem_heap_alloc(heap, sizeof(mrec_buf_t) * 3));
 
1557
        buf = mem_heap_alloc(heap, sizeof(mrec_buf_t) * 3);
1558
1558
 
1559
1559
        /* Write a record and read the next record.  Split the output
1560
1560
        file in two halves, which can be merged on the following pass. */
1603
1603
        const dict_index_t*     index,  /*!< in: index being created */
1604
1604
        merge_file_t*           file,   /*!< in/out: file containing
1605
1605
                                        index entries */
 
1606
        ulint*                  half,   /*!< in/out: half the file */
1606
1607
        row_merge_block_t*      block,  /*!< in/out: 3 buffers */
1607
1608
        int*                    tmpfd,  /*!< in/out: temporary file handle */
1608
 
        TABLE*          table,  /*!< in/out: MySQL table, for
1609
 
                                        reporting erroneous key value
1610
 
                                        if applicable */
1611
 
        ulint*                  num_run,/*!< in/out: Number of runs remain
1612
 
                                        to be merged */
1613
 
        ulint*                  run_offset) /*!< in/out: Array contains the
1614
 
                                        first offset number for each merge
1615
 
                                        run */
 
1609
        struct TABLE*           table)  /*!< in/out: MySQL table, for
 
1610
                                        reporting erroneous key value
 
1611
                                        if applicable */
1616
1612
{
1617
1613
        ulint           foffs0; /*!< first input offset */
1618
1614
        ulint           foffs1; /*!< second input offset */
1619
1615
        ulint           error;  /*!< error code */
1620
1616
        merge_file_t    of;     /*!< output file */
1621
 
        const ulint     ihalf   = run_offset[*num_run / 2];
 
1617
        const ulint     ihalf   = *half;
1622
1618
                                /*!< half the input file */
1623
 
        ulint           n_run   = 0;
1624
 
                                /*!< num of runs generated from this merge */
1625
 
 
 
1619
        ulint           ohalf;  /*!< half the output file */
1626
1620
 
1627
1621
        UNIV_MEM_ASSERT_W(block[0], 3 * sizeof block[0]);
1628
1622
        ut_ad(ihalf < file->offset);
1640
1634
#endif /* POSIX_FADV_SEQUENTIAL */
1641
1635
 
1642
1636
        /* Merge blocks to the output file. */
 
1637
        ohalf = 0;
1643
1638
        foffs0 = 0;
1644
1639
        foffs1 = ihalf;
1645
1640
 
1646
 
        UNIV_MEM_INVALID(run_offset, *num_run * sizeof *run_offset);
1647
 
 
1648
1641
        for (; foffs0 < ihalf && foffs1 < file->offset; foffs0++, foffs1++) {
 
1642
                ulint   ahalf;  /*!< arithmetic half the input file */
1649
1643
 
1650
1644
                if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
1651
1645
                        return(DB_INTERRUPTED);
1652
1646
                }
1653
1647
 
1654
 
                /* Remember the offset number for this run */
1655
 
                run_offset[n_run++] = of.offset;
1656
 
 
1657
1648
                error = row_merge_blocks(index, file, block,
1658
1649
                                         &foffs0, &foffs1, &of, table);
1659
1650
 
1661
1652
                        return(error);
1662
1653
                }
1663
1654
 
 
1655
                /* Record the offset of the output file when
 
1656
                approximately half the output has been generated.  In
 
1657
                this way, the next invocation of row_merge() will
 
1658
                spend most of the time in this loop.  The initial
 
1659
                estimate is ohalf==0. */
 
1660
                ahalf = file->offset / 2;
 
1661
                ut_ad(ohalf <= of.offset);
 
1662
 
 
1663
                /* Improve the estimate until reaching half the input
 
1664
                file size, or we can not get any closer to it.  All
 
1665
                comparands should be non-negative when !(ohalf < ahalf)
 
1666
                because ohalf <= of.offset. */
 
1667
                if (ohalf < ahalf || of.offset - ahalf < ohalf - ahalf) {
 
1668
                        ohalf = of.offset;
 
1669
                }
1664
1670
        }
1665
1671
 
1666
1672
        /* Copy the last blocks, if there are any. */
1670
1676
                        return(DB_INTERRUPTED);
1671
1677
                }
1672
1678
 
1673
 
                /* Remember the offset number for this run */
1674
 
                run_offset[n_run++] = of.offset;
1675
 
 
1676
1679
                if (!row_merge_blocks_copy(index, file, block, &foffs0, &of)) {
1677
1680
                        return(DB_CORRUPTION);
1678
1681
                }
1685
1688
                        return(DB_INTERRUPTED);
1686
1689
                }
1687
1690
 
1688
 
                /* Remember the offset number for this run */
1689
 
                run_offset[n_run++] = of.offset;
1690
 
 
1691
1691
                if (!row_merge_blocks_copy(index, file, block, &foffs1, &of)) {
1692
1692
                        return(DB_CORRUPTION);
1693
1693
                }
1699
1699
                return(DB_CORRUPTION);
1700
1700
        }
1701
1701
 
1702
 
        ut_ad(n_run <= *num_run);
1703
 
 
1704
 
        *num_run = n_run;
1705
 
 
1706
 
        /* Each run can contain one or more offsets. As merge goes on,
1707
 
        the number of runs (to merge) will reduce until we have one
1708
 
        single run. So the number of runs will always be smaller than
1709
 
        the number of offsets in file */
1710
 
        ut_ad((*num_run) <= file->offset);
1711
 
 
1712
 
        /* The number of offsets in output file is always equal or
1713
 
        smaller than input file */
1714
 
        ut_ad(of.offset <= file->offset);
1715
 
 
1716
1702
        /* Swap file descriptors for the next pass. */
1717
1703
        *tmpfd = file->fd;
1718
1704
        *file = of;
 
1705
        *half = ohalf;
1719
1706
 
1720
1707
        UNIV_MEM_INVALID(block[0], 3 * sizeof block[0]);
1721
1708
 
1735
1722
                                        index entries */
1736
1723
        row_merge_block_t*      block,  /*!< in/out: 3 buffers */
1737
1724
        int*                    tmpfd,  /*!< in/out: temporary file handle */
1738
 
        TABLE*          table)  /*!< in/out: MySQL table, for
 
1725
        struct TABLE*           table)  /*!< in/out: MySQL table, for
1739
1726
                                        reporting erroneous key value
1740
1727
                                        if applicable */
1741
1728
{
1742
1729
        ulint   half = file->offset / 2;
1743
 
        ulint   num_runs;
1744
 
        ulint*  run_offset;
1745
 
        ulint   error = DB_SUCCESS;
1746
 
 
1747
 
        /* Record the number of merge runs we need to perform */
1748
 
        num_runs = file->offset;
1749
 
 
1750
 
        /* If num_runs are less than 1, nothing to merge */
1751
 
        if (num_runs <= 1) {
1752
 
                return(error);
1753
 
        }
1754
 
 
1755
 
        /* "run_offset" records each run's first offset number */
1756
 
        run_offset = (ulint*) mem_alloc(file->offset * sizeof(ulint));
1757
 
 
1758
 
        /* This tells row_merge() where to start for the first round
1759
 
        of merge. */
1760
 
        run_offset[half] = half;
1761
1730
 
1762
1731
        /* The file should always contain at least one byte (the end
1763
1732
        of file marker).  Thus, it must be at least one block. */
1764
1733
        ut_ad(file->offset > 0);
1765
1734
 
1766
 
        /* Merge the runs until we have one big run */
1767
1735
        do {
1768
 
                error = row_merge(trx, index, file, block, tmpfd,
1769
 
                                  table, &num_runs, run_offset);
 
1736
                ulint   error;
1770
1737
 
1771
 
                UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
 
1738
                error = row_merge(trx, index, file, &half,
 
1739
                                  block, tmpfd, table);
1772
1740
 
1773
1741
                if (error != DB_SUCCESS) {
1774
 
                        break;
 
1742
                        return(error);
1775
1743
                }
1776
 
        } while (num_runs > 1);
1777
 
 
1778
 
        mem_free(run_offset);
1779
 
 
1780
 
        return(error);
 
1744
 
 
1745
                /* half > 0 should hold except when the file consists
 
1746
                of one block.  No need to merge further then. */
 
1747
                ut_ad(half > 0 || file->offset == 1);
 
1748
        } while (half < file->offset && half > 0);
 
1749
 
 
1750
        return(DB_SUCCESS);
1781
1751
}
1782
1752
 
1783
1753
/*************************************************************//**
1813
1783
                (below). */
1814
1784
                data = btr_rec_copy_externally_stored_field(
1815
1785
                        mrec, offsets, zip_size, i, &len, heap);
1816
 
                /* Because we have locked the table, any records
1817
 
                written by incomplete transactions must have been
1818
 
                rolled back already. There must not be any incomplete
1819
 
                BLOB columns. */
1820
 
                ut_a(data);
1821
1786
 
1822
1787
                dfield_set_data(field, data, len);
1823
1788
        }
1869
1834
        {
1870
1835
                ulint i = 1 + REC_OFFS_HEADER_SIZE
1871
1836
                        + dict_index_get_n_fields(index);
1872
 
                offsets = static_cast<ulint *>(mem_heap_alloc(graph_heap, i * sizeof *offsets));
 
1837
                offsets = mem_heap_alloc(graph_heap, i * sizeof *offsets);
1873
1838
                offsets[0] = i;
1874
1839
                offsets[1] = dict_index_get_n_fields(index);
1875
1840
        }
1879
1844
        if (!row_merge_read(fd, foffs, block)) {
1880
1845
                error = DB_CORRUPTION;
1881
1846
        } else {
1882
 
                mrec_buf_t*     buf = static_cast<mrec_buf_t *>(mem_heap_alloc(graph_heap, sizeof *buf));
 
1847
                mrec_buf_t*     buf = mem_heap_alloc(graph_heap, sizeof *buf);
1883
1848
 
1884
1849
                for (;;) {
1885
1850
                        const mrec_t*   mrec;
1977
1942
        /* We use the select query graph as the dummy graph needed
1978
1943
        in the lock module call */
1979
1944
 
1980
 
        thr = que_fork_get_first_thr(static_cast<que_fork_t *>(que_node_get_parent(thr)));
 
1945
        thr = que_fork_get_first_thr(que_node_get_parent(thr));
1981
1946
        que_thr_move_to_run_state_for_mysql(thr, trx);
1982
1947
 
1983
1948
run_again:
2007
1972
                        que_node_t*     parent;
2008
1973
 
2009
1974
                        parent = que_node_get_parent(thr);
2010
 
                        run_thr = que_fork_start_command(static_cast<que_fork_t *>(parent));
 
1975
                        run_thr = que_fork_start_command(parent);
2011
1976
 
2012
1977
                        ut_a(run_thr == thr);
2013
1978
 
2062
2027
 
2063
2028
        ut_ad(index && table && trx);
2064
2029
 
2065
 
        pars_info_add_ull_literal(info, "indexid", index->id);
 
2030
        pars_info_add_dulint_literal(info, "indexid", index->id);
2066
2031
 
2067
2032
        trx_start_if_not_started(trx);
2068
2033
        trx->op_info = "dropping index";
2076
2041
        /* Replace this index with another equivalent index for all
2077
2042
        foreign key constraints on this table where this index is used */
2078
2043
 
2079
 
        dict_table_replace_index_in_foreign_list(table, index, trx);
 
2044
        dict_table_replace_index_in_foreign_list(table, index);
2080
2045
        dict_index_remove_from_cache(table, index);
2081
2046
 
2082
2047
        trx->op_info = "";
2133
2098
                const rec_t*    rec;
2134
2099
                const byte*     field;
2135
2100
                ulint           len;
2136
 
                table_id_t      table_id;
 
2101
                dulint          table_id;
2137
2102
                dict_table_t*   table;
2138
2103
 
2139
2104
                btr_pcur_move_to_next_user_rec(&pcur, &mtr);
2204
2169
        /* This temp file open does not go through normal
2205
2170
        file APIs, add instrumentation to register with
2206
2171
        performance schema */
2207
 
        struct PSI_file_locker* locker = NULL;
2208
 
        PSI_file_locker_state   state;
2209
 
        register_pfs_file_open_begin(&state, locker, innodb_file_temp_key,
 
2172
        struct PSI_file_locker* locker = NULL;
 
2173
        register_pfs_file_open_begin(locker, innodb_file_temp_key,
2210
2174
                                     PSI_FILE_OPEN,
2211
2175
                                     "Innodb Merge Temp File",
2212
2176
                                     __FILE__, __LINE__);
2228
2192
        merge_file_t*   merge_file)     /*!< out: merge file structure */
2229
2193
{
2230
2194
#ifdef UNIV_PFS_IO
2231
 
        struct PSI_file_locker* locker = NULL;
2232
 
        PSI_file_locker_state   state;
2233
 
        register_pfs_file_io_begin(&state, locker, merge_file->fd, 0, PSI_FILE_CLOSE,
 
2195
        struct PSI_file_locker* locker = NULL;
 
2196
        register_pfs_file_io_begin(locker, merge_file->fd, 0, PSI_FILE_CLOSE,
2234
2197
                                   __FILE__, __LINE__);
2235
2198
#endif
2236
2199
        if (merge_file->fd != -1) {
2362
2325
 
2363
2326
        trx->op_info = "renaming indexes";
2364
2327
 
2365
 
        pars_info_add_ull_literal(info, "tableid", table->id);
 
2328
        pars_info_add_dulint_literal(info, "tableid", table->id);
2366
2329
 
2367
2330
        err = que_eval_sql(info, rename_indexes, FALSE, trx);
2368
2331
 
2399
2362
{
2400
2363
        ulint           err     = DB_ERROR;
2401
2364
        pars_info_t*    info;
2402
 
        char            old_name[MAX_TABLE_NAME_LEN + 1];
 
2365
        const char*     old_name= old_table->name;
2403
2366
 
2404
2367
        ut_ad(trx->mysql_thread_id == os_thread_get_curr_id());
2405
2368
        ut_ad(old_table != new_table);
2407
2370
 
2408
2371
        ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
2409
2372
 
2410
 
        /* store the old/current name to an automatic variable */
2411
 
        if (strlen(old_table->name) + 1 <= sizeof(old_name)) {
2412
 
                memcpy(old_name, old_table->name, strlen(old_table->name) + 1);
2413
 
        } else {
2414
 
                ut_print_timestamp(stderr);
2415
 
                fprintf(stderr, "InnoDB: too long table name: '%s', "
2416
 
                        "max length is %d\n", old_table->name,
2417
 
                        MAX_TABLE_NAME_LEN);
2418
 
                ut_error;
2419
 
        }
2420
 
 
2421
 
        /* store the old/current name to an automatic variable */
2422
 
        if (strlen(old_table->name) + 1 <= sizeof(old_name)) {
2423
 
                memcpy(old_name, old_table->name, strlen(old_table->name) + 1);
2424
 
        } else {
2425
 
                ut_print_timestamp(stderr);
2426
 
                fprintf(stderr, "InnoDB: too long table name: '%s', "
2427
 
                        "max length is %d\n", old_table->name,
2428
 
                        MAX_TABLE_NAME_LEN);
2429
 
                ut_error;
2430
 
        }
2431
 
 
2432
2373
        trx->op_info = "renaming tables";
2433
2374
 
2434
2375
        /* We use the private SQL parser of Innobase to generate the query
2464
2405
                goto err_exit;
2465
2406
        }
2466
2407
 
2467
 
        err = dict_load_foreigns(old_name, FALSE, TRUE);
 
2408
        err = dict_load_foreigns(old_name, TRUE);
2468
2409
 
2469
2410
        if (err != DB_SUCCESS) {
2470
2411
err_exit:
2504
2445
        node = ind_create_graph_create(index, heap);
2505
2446
        thr = pars_complete_graph_for_exec(node, trx, heap);
2506
2447
 
2507
 
        ut_a(thr == que_fork_start_command(static_cast<que_fork_t *>(que_node_get_parent(thr))));
 
2448
        ut_a(thr == que_fork_start_command(que_node_get_parent(thr)));
2508
2449
 
2509
2450
        que_run_threads(thr);
2510
2451
 
2561
2502
                /* Note the id of the transaction that created this
2562
2503
                index, we use it to restrict readers from accessing
2563
2504
                this index, to ensure read consistency. */
2564
 
                index->trx_id = trx->id;
 
2505
                index->trx_id = (ib_uint64_t)
 
2506
                        ut_conv_dulint_to_longlong(trx->id);
2565
2507
        } else {
2566
2508
                index = NULL;
2567
2509
        }
2578
2520
        const trx_t*            trx,    /*!< in: transaction */
2579
2521
        const dict_index_t*     index)  /*!< in: index to check */
2580
2522
{
2581
 
        return(!trx->read_view
2582
 
               || read_view_sees_trx_id(trx->read_view, index->trx_id));
 
2523
        return(!trx->read_view || read_view_sees_trx_id(
 
2524
                       trx->read_view,
 
2525
                       ut_dulint_create((ulint) (index->trx_id >> 32),
 
2526
                                        (ulint) index->trx_id & 0xFFFFFFFF)));
2583
2527
}
2584
2528
 
2585
2529
/*********************************************************************//**
2637
2581
        /* Allocate memory for merge file data structure and initialize
2638
2582
        fields */
2639
2583
 
2640
 
        merge_files = static_cast<merge_file_t *>(mem_alloc(n_indexes * sizeof *merge_files));
 
2584
        merge_files = mem_alloc(n_indexes * sizeof *merge_files);
2641
2585
        block_size = 3 * sizeof *block;
2642
 
        block = static_cast<row_merge_block_t *>(os_mem_alloc_large(&block_size));
 
2586
        block = os_mem_alloc_large(&block_size);
2643
2587
 
2644
2588
        for (i = 0; i < n_indexes; i++) {
2645
2589