~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/lock/lock0lock.c

  • Committer: lbieber
  • Date: 2010-10-06 16:34:16 UTC
  • mfrom: (1816.1.3 build)
  • Revision ID: lbieber@orisndriz08-20101006163416-ea0sl59qgpglk21y
Merge Monty - Change the requirement from either libinnodb to libhaildb. Also, tied it to version 2.2
Merge Andrew - fix bug 650935: remove --compress from all clients
Merge Andrew - fix bug 653471: Add -A to drizzle client
Merge Travis - 621861 = To change C structs to C++ classes in Drizzle

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (C) 1996, 2010, Innobase Oy. All Rights Reserved.
 
3
Copyright (c) 1996, 2009, Innobase Oy. All Rights Reserved.
4
4
 
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
361
361
/*===================*/
362
362
        ulint   space,  /*!< in: space id */
363
363
        ulint   page_no);/*!< in: page number */
 
364
 
 
365
/* Define the following in order to enable lock_rec_validate_page() checks. */
 
366
# undef UNIV_DEBUG_LOCK_VALIDATE
364
367
#endif /* UNIV_DEBUG */
365
368
 
366
369
/* The lock system */
374
377
/* Flags for recursive deadlock search */
375
378
#define LOCK_VICTIM_IS_START    1
376
379
#define LOCK_VICTIM_IS_OTHER    2
377
 
#define LOCK_EXCEED_MAX_DEPTH   3
378
380
 
379
381
/********************************************************************//**
380
382
Checks if a lock request results in a deadlock.
393
395
deadlock and we chose 'start' as the victim, LOCK_VICTIM_IS_OTHER if a
394
396
deadlock was found and we chose some other trx as a victim: we must do
395
397
the search again in this last case because there may be another
396
 
deadlock!
397
 
LOCK_EXCEED_MAX_DEPTH if the lock search exceeds max steps or max depth. */
 
398
deadlock! */
398
399
static
399
400
ulint
400
401
lock_deadlock_recursive(
401
402
/*====================*/
402
403
        trx_t*  start,          /*!< in: recursion starting point */
403
404
        trx_t*  trx,            /*!< in: a transaction waiting for a lock */
404
 
        lock_t* wait_lock,      /*!< in:  lock that is waiting to be granted */
 
405
        lock_t* wait_lock,      /*!< in: the lock trx is waiting to be granted */
405
406
        ulint*  cost,           /*!< in/out: number of calculation steps thus
406
407
                                far: if this exceeds LOCK_MAX_N_STEPS_...
407
 
                                we return LOCK_EXCEED_MAX_DEPTH */
 
408
                                we return LOCK_VICTIM_IS_START */
408
409
        ulint   depth);         /*!< in: recursion depth: if this exceeds
409
410
                                LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK, we
410
 
                                return LOCK_EXCEED_MAX_DEPTH */
 
411
                                return LOCK_VICTIM_IS_START */
411
412
 
412
413
/*********************************************************************//**
413
414
Gets the nth bit of a record lock.
414
 
@return TRUE if bit set also if i == ULINT_UNDEFINED return FALSE*/
 
415
@return TRUE if bit set */
415
416
UNIV_INLINE
416
417
ibool
417
418
lock_rec_get_nth_bit(
466
467
        /* A sanity check: the trx_id in rec must be smaller than the global
467
468
        trx id counter */
468
469
 
469
 
        if (UNIV_UNLIKELY(trx_id >= trx_sys->max_trx_id)) {
 
470
        if (ut_dulint_cmp(trx_id, trx_sys->max_trx_id) >= 0) {
470
471
                ut_print_timestamp(stderr);
471
472
                fputs("  InnoDB: Error: transaction id associated"
472
473
                      " with record\n",
479
480
                        " global trx id counter " TRX_ID_FMT "!\n"
480
481
                        "InnoDB: The table is corrupt. You have to do"
481
482
                        " dump + drop + reimport.\n",
482
 
                        trx_id, trx_sys->max_trx_id);
 
483
                        TRX_ID_PREP_PRINTF(trx_id),
 
484
                        TRX_ID_PREP_PRINTF(trx_sys->max_trx_id));
483
485
 
484
486
                is_ok = FALSE;
485
487
        }
553
555
        }
554
556
 
555
557
        max_trx_id = page_get_max_trx_id(page_align(rec));
556
 
        ut_ad(max_trx_id);
 
558
        ut_ad(!ut_dulint_is_zero(max_trx_id));
557
559
 
558
 
        return(max_trx_id < view->up_limit_id);
 
560
        return(ut_dulint_cmp(max_trx_id, view->up_limit_id) < 0);
559
561
}
560
562
 
561
563
/*********************************************************************//**
566
568
/*============*/
567
569
        ulint   n_cells)        /*!< in: number of slots in lock hash table */
568
570
{
569
 
        lock_sys = static_cast<lock_sys_t *>(mem_alloc(sizeof(lock_sys_t)));
 
571
        lock_sys = mem_alloc(sizeof(lock_sys_t));
570
572
 
571
573
        lock_sys->rec_hash = hash_create(n_cells);
572
574
 
577
579
}
578
580
 
579
581
/*********************************************************************//**
580
 
Closes the lock system at database shutdown. */
581
 
UNIV_INTERN
582
 
void
583
 
lock_sys_close(void)
584
 
/*================*/
585
 
{
586
 
        if (lock_latest_err_file != NULL) {
587
 
                fclose(lock_latest_err_file);
588
 
                lock_latest_err_file = NULL;
589
 
        }
590
 
 
591
 
        hash_table_free(lock_sys->rec_hash);
592
 
        mem_free(lock_sys);
593
 
        lock_sys = NULL;
594
 
}
595
 
 
596
 
/*********************************************************************//**
597
582
Gets the size of a lock struct.
598
583
@return size in bytes */
599
584
UNIV_INTERN
615
600
{
616
601
        ut_ad(lock);
617
602
 
618
 
        return static_cast<lock_mode>(lock->type_mode & LOCK_MODE_MASK);
 
603
        return(lock->type_mode & LOCK_MODE_MASK);
619
604
}
620
605
 
621
606
/*********************************************************************//**
918
903
        ut_ad(lock_get_type_low(lock2) == LOCK_REC);
919
904
 
920
905
        if (trx != lock2->trx
921
 
            && !lock_mode_compatible(static_cast<lock_mode>(LOCK_MODE_MASK & type_mode),
 
906
            && !lock_mode_compatible(LOCK_MODE_MASK & type_mode,
922
907
                                     lock_get_mode(lock2))) {
923
908
 
924
909
                /* We have somewhat complex rules when gap type record locks
1113
1098
        page_no = lock->un_member.rec_lock.page_no;
1114
1099
 
1115
1100
        for (;;) {
1116
 
                lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
 
1101
                lock = HASH_GET_NEXT(hash, lock);
1117
1102
 
1118
1103
                if (!lock) {
1119
1104
 
1145
1130
 
1146
1131
        ut_ad(mutex_own(&kernel_mutex));
1147
1132
 
1148
 
        lock = static_cast<lock_t *>(HASH_GET_FIRST(lock_sys->rec_hash,
1149
 
                              lock_rec_hash(space, page_no)));
 
1133
        lock = HASH_GET_FIRST(lock_sys->rec_hash,
 
1134
                              lock_rec_hash(space, page_no));
1150
1135
        while (lock) {
1151
1136
                if ((lock->un_member.rec_lock.space == space)
1152
1137
                    && (lock->un_member.rec_lock.page_no == page_no)) {
1154
1139
                        break;
1155
1140
                }
1156
1141
 
1157
 
                lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
 
1142
                lock = HASH_GET_NEXT(hash, lock);
1158
1143
        }
1159
1144
 
1160
1145
        return(lock);
1204
1189
 
1205
1190
        hash = buf_block_get_lock_hash_val(block);
1206
1191
 
1207
 
        lock = static_cast<lock_t *>(HASH_GET_FIRST(lock_sys->rec_hash, hash));
 
1192
        lock = HASH_GET_FIRST(lock_sys->rec_hash, hash);
1208
1193
 
1209
1194
        while (lock) {
1210
1195
                if ((lock->un_member.rec_lock.space == space)
1213
1198
                        break;
1214
1199
                }
1215
1200
 
1216
 
                lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
 
1201
                lock = HASH_GET_NEXT(hash, lock);
1217
1202
        }
1218
1203
 
1219
1204
        return(lock);
1221
1206
 
1222
1207
/*********************************************************************//**
1223
1208
Gets the next explicit lock request on a record.
1224
 
@return next lock, NULL if none exists or if heap_no == ULINT_UNDEFINED */
 
1209
@return next lock, NULL if none exists */
1225
1210
UNIV_INLINE
1226
1211
lock_t*
1227
1212
lock_rec_get_next(
1303
1288
 
1304
1289
        size = sizeof(lock_t) + lock_rec_get_n_bits(lock) / 8;
1305
1290
 
1306
 
        return static_cast<lock_t *>(mem_heap_dup(heap, lock, size));
 
1291
        return(mem_heap_dup(heap, lock, size));
1307
1292
}
1308
1293
 
1309
1294
/*********************************************************************//**
1418
1403
        while (lock) {
1419
1404
                if (lock->trx == trx
1420
1405
                    && lock_mode_stronger_or_eq(lock_get_mode(lock),
1421
 
                                                static_cast<lock_mode>(precise_mode & LOCK_MODE_MASK))
 
1406
                                                precise_mode & LOCK_MODE_MASK)
1422
1407
                    && !lock_get_wait(lock)
1423
1408
                    && (!lock_rec_get_rec_not_gap(lock)
1424
1409
                        || (precise_mode & LOCK_REC_NOT_GAP)
1591
1576
        max trx id to the log, and therefore during recovery, this value
1592
1577
        for a page may be incorrect. */
1593
1578
 
1594
 
        if (page_get_max_trx_id(page) < trx_list_get_min_trx_id()
 
1579
        if (!(ut_dulint_cmp(page_get_max_trx_id(page),
 
1580
                            trx_list_get_min_trx_id()) >= 0)
1595
1581
            && !recv_recovery_is_on()) {
1596
1582
 
1597
1583
                return(NULL);
1620
1606
ulint
1621
1607
lock_number_of_rows_locked(
1622
1608
/*=======================*/
1623
 
        const trx_t*    trx)    /*!< in: transaction */
 
1609
        trx_t*  trx)    /*!< in: transaction */
1624
1610
{
1625
1611
        lock_t* lock;
1626
1612
        ulint   n_records = 0;
1694
1680
        n_bits = page_dir_get_n_heap(page) + LOCK_PAGE_BITMAP_MARGIN;
1695
1681
        n_bytes = 1 + n_bits / 8;
1696
1682
 
1697
 
        lock = static_cast<lock_t *>(mem_heap_alloc(trx->lock_heap, sizeof(lock_t) + n_bytes));
 
1683
        lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t) + n_bytes);
1698
1684
 
1699
1685
        UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
1700
1686
 
1729
1715
Enqueues a waiting request for a lock which cannot be granted immediately.
1730
1716
Checks for deadlocks.
1731
1717
@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or
1732
 
DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that
1733
 
there was a deadlock, but another transaction was chosen as a victim,
1734
 
and we got the lock immediately: no need to wait then */
 
1718
DB_SUCCESS; DB_SUCCESS means that there was a deadlock, but another
 
1719
transaction was chosen as a victim, and we got the lock immediately:
 
1720
no need to wait then */
1735
1721
static
1736
 
enum db_err
 
1722
ulint
1737
1723
lock_rec_enqueue_waiting(
1738
1724
/*=====================*/
1739
1725
        ulint                   type_mode,/*!< in: lock mode this
1805
1791
 
1806
1792
        if (trx->wait_lock == NULL) {
1807
1793
 
1808
 
                return(DB_SUCCESS_LOCKED_REC);
 
1794
                return(DB_SUCCESS);
1809
1795
        }
1810
1796
 
1811
1797
        trx->que_state = TRX_QUE_LOCK_WAIT;
1816
1802
 
1817
1803
#ifdef UNIV_DEBUG
1818
1804
        if (lock_print_waits) {
1819
 
                fprintf(stderr, "Lock wait for trx " TRX_ID_FMT " in index ",
1820
 
                        trx->id);
 
1805
                fprintf(stderr, "Lock wait for trx %lu in index ",
 
1806
                        (ulong) ut_dulint_get_low(trx->id));
1821
1807
                ut_print_name(stderr, trx, FALSE, index->name);
1822
1808
        }
1823
1809
#endif /* UNIV_DEBUG */
1921
1907
        return(lock_rec_create(type_mode, block, heap_no, index, trx));
1922
1908
}
1923
1909
 
1924
 
/** Record locking request status */
1925
 
enum lock_rec_req_status {
1926
 
        /** Failed to acquire a lock */
1927
 
        LOCK_REC_FAIL,
1928
 
        /** Succeeded in acquiring a lock (implicit or already acquired) */
1929
 
        LOCK_REC_SUCCESS,
1930
 
        /** Explicitly created a new lock */
1931
 
        LOCK_REC_SUCCESS_CREATED
1932
 
};
1933
 
 
1934
1910
/*********************************************************************//**
1935
1911
This is a fast routine for locking a record in the most common cases:
1936
1912
there are no explicit locks on the page, or there is just one lock, owned
1938
1914
which does NOT look at implicit locks! Checks lock compatibility within
1939
1915
explicit locks. This function sets a normal next-key lock, or in the case of
1940
1916
a page supremum record, a gap type lock.
1941
 
@return whether the locking succeeded */
 
1917
@return TRUE if locking succeeded */
1942
1918
UNIV_INLINE
1943
 
enum lock_rec_req_status
 
1919
ibool
1944
1920
lock_rec_lock_fast(
1945
1921
/*===============*/
1946
1922
        ibool                   impl,   /*!< in: if TRUE, no lock is set
1979
1955
                        lock_rec_create(mode, block, heap_no, index, trx);
1980
1956
                }
1981
1957
 
1982
 
                return(LOCK_REC_SUCCESS_CREATED);
 
1958
                return(TRUE);
1983
1959
        }
1984
1960
 
1985
1961
        if (lock_rec_get_next_on_page(lock)) {
1986
1962
 
1987
 
                return(LOCK_REC_FAIL);
 
1963
                return(FALSE);
1988
1964
        }
1989
1965
 
1990
1966
        if (lock->trx != trx
1991
1967
            || lock->type_mode != (mode | LOCK_REC)
1992
1968
            || lock_rec_get_n_bits(lock) <= heap_no) {
1993
1969
 
1994
 
                return(LOCK_REC_FAIL);
 
1970
                return(FALSE);
1995
1971
        }
1996
1972
 
1997
1973
        if (!impl) {
2000
1976
 
2001
1977
                if (!lock_rec_get_nth_bit(lock, heap_no)) {
2002
1978
                        lock_rec_set_nth_bit(lock, heap_no);
2003
 
                        return(LOCK_REC_SUCCESS_CREATED);
2004
1979
                }
2005
1980
        }
2006
1981
 
2007
 
        return(LOCK_REC_SUCCESS);
 
1982
        return(TRUE);
2008
1983
}
2009
1984
 
2010
1985
/*********************************************************************//**
2012
1987
low-level function which does NOT look at implicit locks! Checks lock
2013
1988
compatibility within explicit locks. This function sets a normal next-key
2014
1989
lock, or in the case of a page supremum record, a gap type lock.
2015
 
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
2016
 
or DB_QUE_THR_SUSPENDED */
 
1990
@return DB_SUCCESS, DB_LOCK_WAIT, or error code */
2017
1991
static
2018
 
enum db_err
 
1992
ulint
2019
1993
lock_rec_lock_slow(
2020
1994
/*===============*/
2021
1995
        ibool                   impl,   /*!< in: if TRUE, no lock is set
2032
2006
        que_thr_t*              thr)    /*!< in: query thread */
2033
2007
{
2034
2008
        trx_t*  trx;
 
2009
        ulint   err;
2035
2010
 
2036
2011
        ut_ad(mutex_own(&kernel_mutex));
2037
2012
        ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
2050
2025
                /* The trx already has a strong enough lock on rec: do
2051
2026
                nothing */
2052
2027
 
2053
 
        } else if (lock_rec_other_has_conflicting(static_cast<lock_mode>(mode), block, heap_no, trx)) {
 
2028
                err = DB_SUCCESS;
 
2029
        } else if (lock_rec_other_has_conflicting(mode, block, heap_no, trx)) {
2054
2030
 
2055
2031
                /* If another transaction has a non-gap conflicting request in
2056
2032
                the queue, as this transaction does not have a lock strong
2057
2033
                enough already granted on the record, we have to wait. */
2058
2034
 
2059
 
                return(lock_rec_enqueue_waiting(mode, block, heap_no,
2060
 
                                                index, thr));
2061
 
        } else if (!impl) {
2062
 
                /* Set the requested lock on the record */
2063
 
 
2064
 
                lock_rec_add_to_queue(LOCK_REC | mode, block,
2065
 
                                      heap_no, index, trx);
2066
 
                return(DB_SUCCESS_LOCKED_REC);
 
2035
                err = lock_rec_enqueue_waiting(mode, block, heap_no,
 
2036
                                               index, thr);
 
2037
        } else {
 
2038
                if (!impl) {
 
2039
                        /* Set the requested lock on the record */
 
2040
 
 
2041
                        lock_rec_add_to_queue(LOCK_REC | mode, block,
 
2042
                                              heap_no, index, trx);
 
2043
                }
 
2044
 
 
2045
                err = DB_SUCCESS;
2067
2046
        }
2068
2047
 
2069
 
        return(DB_SUCCESS);
 
2048
        return(err);
2070
2049
}
2071
2050
 
2072
2051
/*********************************************************************//**
2075
2054
which does NOT look at implicit locks! Checks lock compatibility within
2076
2055
explicit locks. This function sets a normal next-key lock, or in the case
2077
2056
of a page supremum record, a gap type lock.
2078
 
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
2079
 
or DB_QUE_THR_SUSPENDED */
 
2057
@return DB_SUCCESS, DB_LOCK_WAIT, or error code */
2080
2058
static
2081
 
enum db_err
 
2059
ulint
2082
2060
lock_rec_lock(
2083
2061
/*==========*/
2084
2062
        ibool                   impl,   /*!< in: if TRUE, no lock is set
2094
2072
        dict_index_t*           index,  /*!< in: index of record */
2095
2073
        que_thr_t*              thr)    /*!< in: query thread */
2096
2074
{
 
2075
        ulint   err;
 
2076
 
2097
2077
        ut_ad(mutex_own(&kernel_mutex));
2098
2078
        ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
2099
2079
              || lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
2105
2085
              || mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP
2106
2086
              || mode - (LOCK_MODE_MASK & mode) == 0);
2107
2087
 
2108
 
        /* We try a simplified and faster subroutine for the most
2109
 
        common cases */
2110
 
        switch (lock_rec_lock_fast(impl, mode, block, heap_no, index, thr)) {
2111
 
        case LOCK_REC_SUCCESS:
2112
 
                return(DB_SUCCESS);
2113
 
        case LOCK_REC_SUCCESS_CREATED:
2114
 
                return(DB_SUCCESS_LOCKED_REC);
2115
 
        case LOCK_REC_FAIL:
2116
 
                return(lock_rec_lock_slow(impl, mode, block,
2117
 
                                          heap_no, index, thr));
 
2088
        if (lock_rec_lock_fast(impl, mode, block, heap_no, index, thr)) {
 
2089
 
 
2090
                /* We try a simplified and faster subroutine for the most
 
2091
                common cases */
 
2092
 
 
2093
                err = DB_SUCCESS;
 
2094
        } else {
 
2095
                err = lock_rec_lock_slow(impl, mode, block,
 
2096
                                         heap_no, index, thr);
2118
2097
        }
2119
2098
 
2120
 
        ut_error;
2121
 
        return(DB_ERROR);
 
2099
        return(err);
2122
2100
}
2123
2101
 
2124
2102
/*********************************************************************//**
2189
2167
 
2190
2168
#ifdef UNIV_DEBUG
2191
2169
        if (lock_print_waits) {
2192
 
                fprintf(stderr, "Lock wait for trx " TRX_ID_FMT " ends\n",
2193
 
                        lock->trx->id);
 
2170
                fprintf(stderr, "Lock wait for trx %lu ends\n",
 
2171
                        (ulong) ut_dulint_get_low(lock->trx->id));
2194
2172
        }
2195
2173
#endif /* UNIV_DEBUG */
2196
2174
 
2404
2382
                if (!lock_rec_get_insert_intention(lock)
2405
2383
                    && !((srv_locks_unsafe_for_binlog
2406
2384
                          || lock->trx->isolation_level
2407
 
                          <= TRX_ISO_READ_COMMITTED)
 
2385
                          == TRX_ISO_READ_COMMITTED)
2408
2386
                         && lock_get_mode(lock) == LOCK_X)) {
2409
2387
 
2410
2388
                        lock_rec_add_to_queue(LOCK_REC | LOCK_GAP
3263
3241
        lock_t* lock,   /*!< in: lock the transaction is requesting */
3264
3242
        trx_t*  trx)    /*!< in: transaction */
3265
3243
{
 
3244
        dict_table_t*   table;
 
3245
        dict_index_t*   index;
3266
3246
        trx_t*          mark_trx;
3267
3247
        ulint           ret;
3268
3248
        ulint           cost    = 0;
3284
3264
 
3285
3265
        ret = lock_deadlock_recursive(trx, trx, lock, &cost, 0);
3286
3266
 
3287
 
        switch (ret) {
3288
 
        case LOCK_VICTIM_IS_OTHER:
 
3267
        if (ret == LOCK_VICTIM_IS_OTHER) {
3289
3268
                /* We chose some other trx as a victim: retry if there still
3290
3269
                is a deadlock */
 
3270
 
3291
3271
                goto retry;
3292
 
 
3293
 
        case LOCK_EXCEED_MAX_DEPTH:
3294
 
                /* If the lock search exceeds the max step
3295
 
                or the max depth, the current trx will be
3296
 
                the victim. Print its information. */
3297
 
                rewind(lock_latest_err_file);
3298
 
                ut_print_timestamp(lock_latest_err_file);
3299
 
 
3300
 
                fputs("TOO DEEP OR LONG SEARCH IN THE LOCK TABLE"
3301
 
                      " WAITS-FOR GRAPH, WE WILL ROLL BACK"
3302
 
                      " FOLLOWING TRANSACTION \n",
3303
 
                      lock_latest_err_file);
3304
 
 
3305
 
                fputs("\n*** TRANSACTION:\n", lock_latest_err_file);
3306
 
                      trx_print(lock_latest_err_file, trx, 3000);
3307
 
 
3308
 
                fputs("*** WAITING FOR THIS LOCK TO BE GRANTED:\n",
3309
 
                      lock_latest_err_file);
3310
 
 
3311
 
                if (lock_get_type(lock) == LOCK_REC) {
3312
 
                        lock_rec_print(lock_latest_err_file, lock);
 
3272
        }
 
3273
 
 
3274
        if (UNIV_UNLIKELY(ret == LOCK_VICTIM_IS_START)) {
 
3275
                if (lock_get_type_low(lock) & LOCK_TABLE) {
 
3276
                        table = lock->un_member.tab_lock.table;
 
3277
                        index = NULL;
3313
3278
                } else {
3314
 
                        lock_table_print(lock_latest_err_file, lock);
 
3279
                        index = lock->index;
 
3280
                        table = index->table;
3315
3281
                }
3316
 
                break;
3317
 
 
3318
 
        case LOCK_VICTIM_IS_START:
 
3282
 
 
3283
                lock_deadlock_found = TRUE;
 
3284
 
3319
3285
                fputs("*** WE ROLL BACK TRANSACTION (2)\n",
3320
3286
                      lock_latest_err_file);
3321
 
                break;
3322
3287
 
3323
 
        default:
3324
 
                /* No deadlock detected*/
3325
 
                return(FALSE);
 
3288
                return(TRUE);
3326
3289
        }
3327
3290
 
3328
 
        lock_deadlock_found = TRUE;
3329
 
 
3330
 
        return(TRUE);
 
3291
        return(FALSE);
3331
3292
}
3332
3293
 
3333
3294
/********************************************************************//**
3336
3297
deadlock and we chose 'start' as the victim, LOCK_VICTIM_IS_OTHER if a
3337
3298
deadlock was found and we chose some other trx as a victim: we must do
3338
3299
the search again in this last case because there may be another
3339
 
deadlock!
3340
 
LOCK_EXCEED_MAX_DEPTH if the lock search exceeds max steps or max depth. */
 
3300
deadlock! */
3341
3301
static
3342
3302
ulint
3343
3303
lock_deadlock_recursive(
3344
3304
/*====================*/
3345
3305
        trx_t*  start,          /*!< in: recursion starting point */
3346
3306
        trx_t*  trx,            /*!< in: a transaction waiting for a lock */
3347
 
        lock_t* wait_lock,      /*!< in: lock that is waiting to be granted */
 
3307
        lock_t* wait_lock,      /*!< in: the lock trx is waiting to be granted */
3348
3308
        ulint*  cost,           /*!< in/out: number of calculation steps thus
3349
3309
                                far: if this exceeds LOCK_MAX_N_STEPS_...
3350
 
                                we return LOCK_EXCEED_MAX_DEPTH */
 
3310
                                we return LOCK_VICTIM_IS_START */
3351
3311
        ulint   depth)          /*!< in: recursion depth: if this exceeds
3352
3312
                                LOCK_MAX_DEPTH_IN_DEADLOCK_CHECK, we
3353
 
                                return LOCK_EXCEED_MAX_DEPTH */
 
3313
                                return LOCK_VICTIM_IS_START */
3354
3314
{
 
3315
        lock_t* lock;
 
3316
        ulint   bit_no          = ULINT_UNDEFINED;
 
3317
        trx_t*  lock_trx;
3355
3318
        ulint   ret;
3356
 
        lock_t* lock;
3357
 
        trx_t*  lock_trx;
3358
 
        ulint   heap_no         = ULINT_UNDEFINED;
3359
3319
 
3360
3320
        ut_a(trx);
3361
3321
        ut_a(start);
3371
3331
 
3372
3332
        *cost = *cost + 1;
3373
3333
 
 
3334
        lock = wait_lock;
 
3335
 
3374
3336
        if (lock_get_type_low(wait_lock) == LOCK_REC) {
3375
 
                ulint           space;
3376
 
                ulint           page_no;
3377
 
 
3378
 
                heap_no = lock_rec_find_set_bit(wait_lock);
3379
 
                ut_a(heap_no != ULINT_UNDEFINED);
3380
 
 
3381
 
                space = wait_lock->un_member.rec_lock.space;
3382
 
                page_no = wait_lock->un_member.rec_lock.page_no;
3383
 
 
3384
 
                lock = lock_rec_get_first_on_page_addr(space, page_no);
3385
 
 
3386
 
                /* Position the iterator on the first matching record lock. */
3387
 
                while (lock != NULL
3388
 
                       && lock != wait_lock
3389
 
                       && !lock_rec_get_nth_bit(lock, heap_no)) {
3390
 
 
3391
 
                        lock = lock_rec_get_next_on_page(lock);
3392
 
                }
3393
 
 
3394
 
                if (lock == wait_lock) {
3395
 
                        lock = NULL;
3396
 
                }
3397
 
 
3398
 
                ut_ad(lock == NULL || lock_rec_get_nth_bit(lock, heap_no));
3399
 
 
3400
 
        } else {
3401
 
                lock = wait_lock;
 
3337
 
 
3338
                bit_no = lock_rec_find_set_bit(wait_lock);
 
3339
 
 
3340
                ut_a(bit_no != ULINT_UNDEFINED);
3402
3341
        }
3403
3342
 
3404
3343
        /* Look at the locks ahead of wait_lock in the lock queue */
3405
3344
 
3406
3345
        for (;;) {
3407
 
                /* Get previous table lock. */
3408
 
                if (heap_no == ULINT_UNDEFINED) {
3409
 
 
3410
 
                        lock = UT_LIST_GET_PREV(
3411
 
                                un_member.tab_lock.locks, lock);
 
3346
                if (lock_get_type_low(lock) & LOCK_TABLE) {
 
3347
 
 
3348
                        lock = UT_LIST_GET_PREV(un_member.tab_lock.locks,
 
3349
                                                lock);
 
3350
                } else {
 
3351
                        ut_ad(lock_get_type_low(lock) == LOCK_REC);
 
3352
                        ut_a(bit_no != ULINT_UNDEFINED);
 
3353
 
 
3354
                        lock = (lock_t*) lock_rec_get_prev(lock, bit_no);
3412
3355
                }
3413
3356
 
3414
3357
                if (lock == NULL) {
3426
3369
 
3427
3370
                        lock_trx = lock->trx;
3428
3371
 
3429
 
                        if (lock_trx == start) {
 
3372
                        if (lock_trx == start || too_far) {
3430
3373
 
3431
3374
                                /* We came back to the recursion starting
3432
3375
                                point: a deadlock detected; or we have
3473
3416
                                }
3474
3417
#ifdef UNIV_DEBUG
3475
3418
                                if (lock_print_waits) {
3476
 
                                        fputs("Deadlock detected\n",
 
3419
                                        fputs("Deadlock detected"
 
3420
                                              " or too long search\n",
3477
3421
                                              stderr);
3478
3422
                                }
3479
3423
#endif /* UNIV_DEBUG */
3480
 
 
3481
 
                                if (trx_weight_ge(wait_lock->trx, start)) {
 
3424
                                if (too_far) {
 
3425
 
 
3426
                                        fputs("TOO DEEP OR LONG SEARCH"
 
3427
                                              " IN THE LOCK TABLE"
 
3428
                                              " WAITS-FOR GRAPH\n", ef);
 
3429
 
 
3430
                                        return(LOCK_VICTIM_IS_START);
 
3431
                                }
 
3432
 
 
3433
                                if (trx_weight_cmp(wait_lock->trx,
 
3434
                                                   start) >= 0) {
3482
3435
                                        /* Our recursion starting point
3483
3436
                                        transaction is 'smaller', let us
3484
3437
                                        choose 'start' as the victim and roll
3511
3464
                                return(LOCK_VICTIM_IS_OTHER);
3512
3465
                        }
3513
3466
 
3514
 
                        if (too_far) {
3515
 
 
3516
 
#ifdef UNIV_DEBUG
3517
 
                                if (lock_print_waits) {
3518
 
                                        fputs("Deadlock search exceeds"
3519
 
                                              " max steps or depth.\n",
3520
 
                                              stderr);
3521
 
                                }
3522
 
#endif /* UNIV_DEBUG */
3523
 
                                /* The information about transaction/lock
3524
 
                                to be rolled back is available in the top
3525
 
                                level. Do not print anything here. */
3526
 
                                return(LOCK_EXCEED_MAX_DEPTH);
3527
 
                        }
3528
 
 
3529
3467
                        if (lock_trx->que_state == TRX_QUE_LOCK_WAIT) {
3530
3468
 
3531
3469
                                /* Another trx ahead has requested lock in an
3535
3473
                                ret = lock_deadlock_recursive(
3536
3474
                                        start, lock_trx,
3537
3475
                                        lock_trx->wait_lock, cost, depth + 1);
3538
 
 
3539
3476
                                if (ret != 0) {
3540
3477
 
3541
3478
                                        return(ret);
3542
3479
                                }
3543
3480
                        }
3544
3481
                }
3545
 
                /* Get the next record lock to check. */
3546
 
                if (heap_no != ULINT_UNDEFINED) {
3547
 
 
3548
 
                        ut_a(lock != NULL);
3549
 
 
3550
 
                        do {
3551
 
                                lock = lock_rec_get_next_on_page(lock);
3552
 
                        } while (lock != NULL
3553
 
                                && lock != wait_lock
3554
 
                                && !lock_rec_get_nth_bit(lock, heap_no));
3555
 
 
3556
 
                        if (lock == wait_lock) {
3557
 
                                lock = NULL;
3558
 
                        }
3559
 
                }
3560
3482
        }/* end of the 'for (;;)'-loop */
3561
3483
}
3562
3484
 
3595
3517
 
3596
3518
                ib_vector_push(trx->autoinc_locks, lock);
3597
3519
        } else {
3598
 
                lock = static_cast<lock_t *>(mem_heap_alloc(trx->lock_heap, sizeof(lock_t)));
 
3520
                lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t));
3599
3521
        }
3600
3522
 
3601
3523
        UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
3652
3574
                and lock_grant()). Therefore it can be empty and we
3653
3575
                need to check for that. */
3654
3576
 
3655
 
                if (!lock_get_wait(lock)
3656
 
                    && !ib_vector_is_empty(trx->autoinc_locks)) {
 
3577
                if (!ib_vector_is_empty(trx->autoinc_locks)) {
3657
3578
                        lock_t* autoinc_lock;
3658
3579
 
3659
 
                        autoinc_lock = static_cast<lock_t *>(ib_vector_pop(trx->autoinc_locks));
 
3580
                        autoinc_lock = ib_vector_pop(trx->autoinc_locks);
3660
3581
                        ut_a(autoinc_lock == lock);
3661
3582
                }
3662
3583
 
3726
3647
 
3727
3648
        if (lock_deadlock_occurs(lock, trx)) {
3728
3649
 
3729
 
                /* The order here is important, we don't want to
3730
 
                lose the state of the lock before calling remove. */
 
3650
                lock_reset_lock_and_trx_wait(lock);
3731
3651
                lock_table_remove_low(lock);
3732
 
                lock_reset_lock_and_trx_wait(lock);
3733
3652
 
3734
3653
                return(DB_DEADLOCK);
3735
3654
        }
3738
3657
                /* Deadlock resolution chose another transaction as a victim,
3739
3658
                and we accidentally got our lock granted! */
3740
3659
 
3741
 
                return(DB_SUCCESS_LOCKED_REC);
 
3660
                return(DB_SUCCESS);
3742
3661
        }
3743
3662
 
3744
3663
        trx->que_state = TRX_QUE_LOCK_WAIT;
3752
3671
 
3753
3672
/*********************************************************************//**
3754
3673
Checks if other transactions have an incompatible mode lock request in
3755
 
the lock queue.
3756
 
@return lock or NULL */
 
3674
the lock queue. */
3757
3675
UNIV_INLINE
3758
 
lock_t*
 
3676
ibool
3759
3677
lock_table_other_has_incompatible(
3760
3678
/*==============================*/
3761
3679
        trx_t*          trx,    /*!< in: transaction, or NULL if all
3777
3695
                    && (!lock_mode_compatible(lock_get_mode(lock), mode))
3778
3696
                    && (wait || !(lock_get_wait(lock)))) {
3779
3697
 
3780
 
                        return(lock);
 
3698
                        return(TRUE);
3781
3699
                }
3782
3700
 
3783
3701
                lock = UT_LIST_GET_PREV(un_member.tab_lock.locks, lock);
3784
3702
        }
3785
3703
 
3786
 
        return(NULL);
 
3704
        return(FALSE);
3787
3705
}
3788
3706
 
3789
3707
/*********************************************************************//**
3807
3725
 
3808
3726
        if (flags & BTR_NO_LOCKING_FLAG) {
3809
3727
 
3810
 
                return(DB_SUCCESS_LOCKED_REC);
 
3728
                return(DB_SUCCESS);
3811
3729
        }
3812
3730
 
3813
3731
        ut_a(flags == 0);
3934
3852
        const rec_t*            rec,    /*!< in: record */
3935
3853
        enum lock_mode          lock_mode)/*!< in: LOCK_S or LOCK_X */
3936
3854
{
3937
 
        lock_t* first_lock;
3938
3855
        lock_t* lock;
 
3856
        lock_t* release_lock    = NULL;
3939
3857
        ulint   heap_no;
3940
3858
 
3941
3859
        ut_ad(trx && rec);
3945
3863
 
3946
3864
        mutex_enter(&kernel_mutex);
3947
3865
 
3948
 
        first_lock = lock_rec_get_first(block, heap_no);
 
3866
        lock = lock_rec_get_first(block, heap_no);
3949
3867
 
3950
3868
        /* Find the last lock with the same lock_mode and transaction
3951
3869
        from the record. */
3952
3870
 
3953
 
        for (lock = first_lock; lock != NULL;
3954
 
             lock = lock_rec_get_next(heap_no, lock)) {
 
3871
        while (lock != NULL) {
3955
3872
                if (lock->trx == trx && lock_get_mode(lock) == lock_mode) {
 
3873
                        release_lock = lock;
3956
3874
                        ut_a(!lock_get_wait(lock));
3957
 
                        lock_rec_reset_nth_bit(lock, heap_no);
3958
 
                        goto released;
3959
3875
                }
3960
 
        }
3961
 
 
3962
 
        mutex_exit(&kernel_mutex);
3963
 
        ut_print_timestamp(stderr);
3964
 
        fprintf(stderr,
3965
 
                "  InnoDB: Error: unlock row could not"
3966
 
                " find a %lu mode lock on the record\n",
3967
 
                (ulong) lock_mode);
3968
 
 
3969
 
        return;
3970
 
 
3971
 
released:
 
3876
 
 
3877
                lock = lock_rec_get_next(heap_no, lock);
 
3878
        }
 
3879
 
 
3880
        /* If a record lock is found, release the record lock */
 
3881
 
 
3882
        if (UNIV_LIKELY(release_lock != NULL)) {
 
3883
                lock_rec_reset_nth_bit(release_lock, heap_no);
 
3884
        } else {
 
3885
                mutex_exit(&kernel_mutex);
 
3886
                ut_print_timestamp(stderr);
 
3887
                fprintf(stderr,
 
3888
                        "  InnoDB: Error: unlock row could not"
 
3889
                        " find a %lu mode lock on the record\n",
 
3890
                        (ulong) lock_mode);
 
3891
 
 
3892
                return;
 
3893
        }
 
3894
 
3972
3895
        /* Check if we can now grant waiting lock requests */
3973
3896
 
3974
 
        for (lock = first_lock; lock != NULL;
3975
 
             lock = lock_rec_get_next(heap_no, lock)) {
 
3897
        lock = lock_rec_get_first(block, heap_no);
 
3898
 
 
3899
        while (lock != NULL) {
3976
3900
                if (lock_get_wait(lock)
3977
3901
                    && !lock_rec_has_to_wait_in_queue(lock)) {
3978
3902
 
3979
3903
                        /* Grant the lock */
3980
3904
                        lock_grant(lock);
3981
3905
                }
 
3906
 
 
3907
                lock = lock_rec_get_next(heap_no, lock);
3982
3908
        }
3983
3909
 
3984
3910
        mutex_exit(&kernel_mutex);
4014
3940
                        ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
4015
3941
 
4016
3942
                        if (lock_get_mode(lock) != LOCK_IS
4017
 
                            && trx->undo_no != 0) {
 
3943
                            && !ut_dulint_is_zero(trx->undo_no)) {
4018
3944
 
4019
3945
                                /* The trx may have modified the table. We
4020
3946
                                block the use of the MySQL query cache for
4213
4139
        fputs("TABLE LOCK table ", file);
4214
4140
        ut_print_name(file, lock->trx, TRUE,
4215
4141
                      lock->un_member.tab_lock.table->name);
4216
 
        fprintf(file, " trx id " TRX_ID_FMT, lock->trx->id);
 
4142
        fprintf(file, " trx id " TRX_ID_FMT,
 
4143
                TRX_ID_PREP_PRINTF(lock->trx->id));
4217
4144
 
4218
4145
        if (lock_get_mode(lock) == LOCK_S) {
4219
4146
                fputs(" lock mode S", file);
4266
4193
                (ulong) space, (ulong) page_no,
4267
4194
                (ulong) lock_rec_get_n_bits(lock));
4268
4195
        dict_index_name_print(file, lock->trx, lock->index);
4269
 
        fprintf(file, " trx id " TRX_ID_FMT, lock->trx->id);
 
4196
        fprintf(file, " trx id " TRX_ID_FMT,
 
4197
                TRX_ID_PREP_PRINTF(lock->trx->id));
4270
4198
 
4271
4199
        if (lock_get_mode(lock) == LOCK_S) {
4272
4200
                fputs(" lock mode S", file);
4298
4226
 
4299
4227
        block = buf_page_try_get(space, page_no, &mtr);
4300
4228
 
4301
 
        for (i = 0; i < lock_rec_get_n_bits(lock); ++i) {
4302
 
 
4303
 
                if (!lock_rec_get_nth_bit(lock, i)) {
4304
 
                        continue;
4305
 
                }
4306
 
 
4307
 
                fprintf(file, "Record lock, heap no %lu", (ulong) i);
4308
 
 
4309
 
                if (block) {
4310
 
                        const rec_t*    rec;
4311
 
 
4312
 
                        rec = page_find_rec_with_heap_no(
4313
 
                                buf_block_get_frame(block), i);
4314
 
 
4315
 
                        offsets = rec_get_offsets(
4316
 
                                rec, lock->index, offsets,
4317
 
                                ULINT_UNDEFINED, &heap);
4318
 
 
4319
 
                        putc(' ', file);
4320
 
                        rec_print_new(file, rec, offsets);
4321
 
                }
4322
 
 
4323
 
                putc('\n', file);
 
4229
        if (block) {
 
4230
                for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
 
4231
 
 
4232
                        if (lock_rec_get_nth_bit(lock, i)) {
 
4233
 
 
4234
                                const rec_t*    rec
 
4235
                                        = page_find_rec_with_heap_no(
 
4236
                                                buf_block_get_frame(block), i);
 
4237
                                offsets = rec_get_offsets(
 
4238
                                        rec, lock->index, offsets,
 
4239
                                        ULINT_UNDEFINED, &heap);
 
4240
 
 
4241
                                fprintf(file, "Record lock, heap no %lu ",
 
4242
                                        (ulong) i);
 
4243
                                rec_print_new(file, rec, offsets);
 
4244
                                putc('\n', file);
 
4245
                        }
 
4246
                }
 
4247
        } else {
 
4248
                for (i = 0; i < lock_rec_get_n_bits(lock); i++) {
 
4249
                        fprintf(file, "Record lock, heap no %lu\n", (ulong) i);
 
4250
                }
4324
4251
        }
4325
4252
 
4326
4253
        mtr_commit(&mtr);
4367
4294
#endif /* PRINT_NUM_OF_LOCK_STRUCTS */
4368
4295
 
4369
4296
/*********************************************************************//**
4370
 
Prints info of locks for all transactions.
4371
 
@return FALSE if not able to obtain kernel mutex
4372
 
and exits without printing info */
 
4297
Prints info of locks for all transactions. */
4373
4298
UNIV_INTERN
4374
 
ibool
 
4299
void
4375
4300
lock_print_info_summary(
4376
4301
/*====================*/
4377
 
        FILE*   file,   /*!< in: file where to print */
4378
 
        ibool   nowait) /*!< in: whether to wait for the kernel mutex */
 
4302
        FILE*   file)   /*!< in: file where to print */
4379
4303
{
4380
 
        /* if nowait is FALSE, wait on the kernel mutex,
4381
 
        otherwise return immediately if fail to obtain the
4382
 
        mutex. */
4383
 
        if (!nowait) {
4384
 
                lock_mutex_enter_kernel();
4385
 
        } else if (mutex_enter_nowait(&kernel_mutex)) {
4386
 
                fputs("FAIL TO OBTAIN KERNEL MUTEX, "
4387
 
                      "SKIP LOCK INFO PRINTING\n", file);
4388
 
                return(FALSE);
4389
 
        }
 
4304
        /* We must protect the MySQL thd->query field with a MySQL mutex, and
 
4305
        because the MySQL mutex must be reserved before the kernel_mutex of
 
4306
        InnoDB, we call innobase_mysql_prepare_print_arbitrary_thd() here. */
 
4307
 
 
4308
        innobase_mysql_prepare_print_arbitrary_thd();
 
4309
        lock_mutex_enter_kernel();
4390
4310
 
4391
4311
        if (lock_deadlock_found) {
4392
4312
                fputs("------------------------\n"
4401
4321
              "------------\n", file);
4402
4322
 
4403
4323
        fprintf(file, "Trx id counter " TRX_ID_FMT "\n",
4404
 
                trx_sys->max_trx_id);
 
4324
                TRX_ID_PREP_PRINTF(trx_sys->max_trx_id));
4405
4325
 
4406
4326
        fprintf(file,
4407
4327
                "Purge done for trx's n:o < " TRX_ID_FMT
4408
4328
                " undo n:o < " TRX_ID_FMT "\n",
4409
 
                purge_sys->purge_trx_no,
4410
 
                purge_sys->purge_undo_no);
 
4329
                TRX_ID_PREP_PRINTF(purge_sys->purge_trx_no),
 
4330
                TRX_ID_PREP_PRINTF(purge_sys->purge_undo_no));
4411
4331
 
4412
4332
        fprintf(file,
4413
4333
                "History list length %lu\n",
4418
4338
                "Total number of lock structs in row lock hash table %lu\n",
4419
4339
                (ulong) lock_get_n_rec_locks());
4420
4340
#endif /* PRINT_NUM_OF_LOCK_STRUCTS */
4421
 
        return(TRUE);
4422
4341
}
4423
4342
 
4424
4343
/*********************************************************************//**
4469
4388
 
4470
4389
        if (trx == NULL) {
4471
4390
                lock_mutex_exit_kernel();
 
4391
                innobase_mysql_end_print_arbitrary_thd();
4472
4392
 
4473
4393
                ut_ad(lock_validate());
4474
4394
 
4484
4404
                                "Trx read view will not see trx with"
4485
4405
                                " id >= " TRX_ID_FMT
4486
4406
                                ", sees < " TRX_ID_FMT "\n",
4487
 
                                trx->read_view->low_limit_id,
4488
 
                                trx->read_view->up_limit_id);
 
4407
                                TRX_ID_PREP_PRINTF(
 
4408
                                        trx->read_view->low_limit_id),
 
4409
                                TRX_ID_PREP_PRINTF(
 
4410
                                        trx->read_view->up_limit_id));
4489
4411
                }
4490
4412
 
4491
4413
                if (trx->que_state == TRX_QUE_LOCK_WAIT) {
4550
4472
                        }
4551
4473
 
4552
4474
                        lock_mutex_exit_kernel();
 
4475
                        innobase_mysql_end_print_arbitrary_thd();
4553
4476
 
4554
4477
                        mtr_start(&mtr);
4555
4478
 
4560
4483
 
4561
4484
                        load_page_first = FALSE;
4562
4485
 
 
4486
                        innobase_mysql_prepare_print_arbitrary_thd();
4563
4487
                        lock_mutex_enter_kernel();
4564
4488
 
4565
4489
                        goto loop;
4697
4621
                        ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
4698
4622
                                               block, heap_no, impl_trx));
4699
4623
                }
4700
 
#if 0
4701
4624
        } else {
4702
4625
 
4703
4626
                /* The kernel mutex may get released temporarily in the
4704
4627
                next function call: we have to release lock table mutex
4705
4628
                to obey the latching order */
4706
4629
 
4707
 
                /* If this thread is holding the file space latch
4708
 
                (fil_space_t::latch), the following check WILL break
4709
 
                latching order and may cause a deadlock of threads. */
4710
 
 
4711
 
                /* NOTE: This is a bogus check that would fail in the
4712
 
                following case: Our transaction is updating a
4713
 
                row. After it has updated the clustered index record,
4714
 
                it goes to a secondary index record and finds someone
4715
 
                else holding an explicit S- or X-lock on that
4716
 
                secondary index record, presumably from a locking
4717
 
                read. Our transaction cannot update the secondary
4718
 
                index immediately, but places a waiting X-lock request
4719
 
                on the secondary index record. There is nothing
4720
 
                illegal in this. The assertion is simply too strong. */
4721
 
 
4722
 
                /* From the locking point of view, each secondary
4723
 
                index is a separate table. A lock that is held on
4724
 
                secondary index rec does not give any rights to modify
4725
 
                or read the clustered index rec. Therefore, we can
4726
 
                think of the sec index as a separate 'table' from the
4727
 
                clust index 'table'. Conversely, a transaction that
4728
 
                has acquired a lock on and modified a clustered index
4729
 
                record may need to wait for a lock on the
4730
 
                corresponding record in a secondary index. */
4731
 
 
4732
4630
                impl_trx = lock_sec_rec_some_has_impl_off_kernel(
4733
4631
                        rec, index, offsets);
4734
4632
 
4739
4637
                        ut_a(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP,
4740
4638
                                               block, heap_no, impl_trx));
4741
4639
                }
4742
 
#endif
4743
4640
        }
4744
4641
 
4745
4642
        lock = lock_rec_get_first(block, heap_no);
4837
4734
             || lock->trx->conc_state == TRX_PREPARED
4838
4735
             || lock->trx->conc_state == TRX_COMMITTED_IN_MEMORY);
4839
4736
 
4840
 
# ifdef UNIV_SYNC_DEBUG
4841
 
        /* Only validate the record queues when this thread is not
4842
 
        holding a space->latch.  Deadlocks are possible due to
4843
 
        latching order violation when UNIV_DEBUG is defined while
4844
 
        UNIV_SYNC_DEBUG is not. */
4845
 
        if (!sync_thread_levels_contains(SYNC_FSP))
4846
 
# endif /* UNIV_SYNC_DEBUG */
4847
4737
        for (i = nth_bit; i < lock_rec_get_n_bits(lock); i++) {
4848
4738
 
4849
4739
                if (i == 1 || lock_rec_get_nth_bit(lock, i)) {
4853
4743
                        ut_a(rec);
4854
4744
                        offsets = rec_get_offsets(rec, index, offsets,
4855
4745
                                                  ULINT_UNDEFINED, &heap);
4856
 
#if 0
 
4746
 
4857
4747
                        fprintf(stderr,
4858
4748
                                "Validating %lu %lu\n",
4859
4749
                                (ulong) space, (ulong) page_no);
4860
 
#endif
 
4750
 
4861
4751
                        lock_mutex_exit_kernel();
4862
4752
 
4863
 
                        /* If this thread is holding the file space
4864
 
                        latch (fil_space_t::latch), the following
4865
 
                        check WILL break the latching order and may
4866
 
                        cause a deadlock of threads. */
4867
 
 
4868
4753
                        lock_rec_queue_validate(block, rec, index, offsets);
4869
4754
 
4870
4755
                        lock_mutex_enter_kernel();
4899
4784
lock_validate(void)
4900
4785
/*===============*/
4901
4786
{
4902
 
        lock_t*         lock;
4903
 
        trx_t*          trx;
4904
 
        ib_uint64_t     limit;
4905
 
        ulint           space;
4906
 
        ulint           page_no;
4907
 
        ulint           i;
 
4787
        lock_t* lock;
 
4788
        trx_t*  trx;
 
4789
        dulint  limit;
 
4790
        ulint   space;
 
4791
        ulint   page_no;
 
4792
        ulint   i;
4908
4793
 
4909
4794
        lock_mutex_enter_kernel();
4910
4795
 
4928
4813
 
4929
4814
        for (i = 0; i < hash_get_n_cells(lock_sys->rec_hash); i++) {
4930
4815
 
4931
 
                limit = 0;
 
4816
                limit = ut_dulint_zero;
4932
4817
 
4933
4818
                for (;;) {
4934
4819
                        lock = HASH_GET_FIRST(lock_sys->rec_hash, i);
4935
4820
 
4936
4821
                        while (lock) {
4937
 
                                ib_uint64_t     space_page;
4938
4822
                                ut_a(trx_in_trx_list(lock->trx));
4939
4823
 
4940
4824
                                space = lock->un_member.rec_lock.space;
4941
4825
                                page_no = lock->un_member.rec_lock.page_no;
4942
4826
 
4943
 
                                space_page = ut_ull_create(space, page_no);
4944
 
 
4945
 
                                if (space_page >= limit) {
 
4827
                                if (ut_dulint_cmp(
 
4828
                                            ut_dulint_create(space, page_no),
 
4829
                                            limit) >= 0) {
4946
4830
                                        break;
4947
4831
                                }
4948
4832
 
4960
4844
 
4961
4845
                        lock_mutex_enter_kernel();
4962
4846
 
4963
 
                        limit = ut_ull_create(space, page_no + 1);
 
4847
                        limit = ut_dulint_create(space, page_no + 1);
4964
4848
                }
4965
4849
        }
4966
4850
 
5052
4936
        on the successor, which produced an unnecessary deadlock. */
5053
4937
 
5054
4938
        if (lock_rec_other_has_conflicting(
5055
 
                    static_cast<lock_mode>(LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION),
 
4939
                    LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION,
5056
4940
                    block, next_rec_heap_no, trx)) {
5057
4941
 
5058
4942
                /* Note that we may get DB_SUCCESS also here! */
5066
4950
 
5067
4951
        lock_mutex_exit_kernel();
5068
4952
 
5069
 
        switch (err) {
5070
 
        case DB_SUCCESS_LOCKED_REC:
5071
 
                err = DB_SUCCESS;
5072
 
                /* fall through */
5073
 
        case DB_SUCCESS:
5074
 
                if (dict_index_is_clust(index)) {
5075
 
                        break;
5076
 
                }
 
4953
        if ((err == DB_SUCCESS) && !dict_index_is_clust(index)) {
5077
4954
                /* Update the page max trx id field */
5078
4955
                page_update_max_trx_id(block,
5079
4956
                                       buf_block_get_page_zip(block),
5196
5073
 
5197
5074
        ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
5198
5075
 
5199
 
        if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
5200
 
                err = DB_SUCCESS;
5201
 
        }
5202
 
 
5203
5076
        return(err);
5204
5077
}
5205
5078
 
5266
5139
        }
5267
5140
#endif /* UNIV_DEBUG */
5268
5141
 
5269
 
        if (err == DB_SUCCESS || err == DB_SUCCESS_LOCKED_REC) {
 
5142
        if (err == DB_SUCCESS) {
5270
5143
                /* Update the page max trx id field */
5271
 
                /* It might not be necessary to do this if
5272
 
                err == DB_SUCCESS (no new lock created),
5273
 
                but it should not cost too much performance. */
5274
5144
                page_update_max_trx_id(block,
5275
5145
                                       buf_block_get_page_zip(block),
5276
5146
                                       thr_get_trx(thr)->id, mtr);
5277
 
                err = DB_SUCCESS;
5278
5147
        }
5279
5148
 
5280
5149
        return(err);
5281
5150
}
5282
5151
 
5283
5152
/*********************************************************************//**
5284
 
Like lock_clust_rec_read_check_and_lock(), but reads a
 
5153
Like the counterpart for a clustered index below, but now we read a
5285
5154
secondary index record.
5286
 
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
5287
 
or DB_QUE_THR_SUSPENDED */
 
5155
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
5288
5156
UNIV_INTERN
5289
 
enum db_err
 
5157
ulint
5290
5158
lock_sec_rec_read_check_and_lock(
5291
5159
/*=============================*/
5292
5160
        ulint                   flags,  /*!< in: if BTR_NO_LOCKING_FLAG
5307
5175
                                        LOCK_REC_NOT_GAP */
5308
5176
        que_thr_t*              thr)    /*!< in: query thread */
5309
5177
{
5310
 
        enum db_err     err;
5311
 
        ulint           heap_no;
 
5178
        ulint   err;
 
5179
        ulint   heap_no;
5312
5180
 
5313
5181
        ut_ad(!dict_index_is_clust(index));
5314
5182
        ut_ad(block->frame == page_align(rec));
5334
5202
        if the max trx id for the page >= min trx id for the trx list or a
5335
5203
        database recovery is running. */
5336
5204
 
5337
 
        if ((page_get_max_trx_id(block->frame) >= trx_list_get_min_trx_id()
 
5205
        if (((ut_dulint_cmp(page_get_max_trx_id(block->frame),
 
5206
                            trx_list_get_min_trx_id()) >= 0)
5338
5207
             || recv_recovery_is_on())
5339
5208
            && !page_rec_is_supremum(rec)) {
5340
5209
 
5348
5217
 
5349
5218
        ut_ad(lock_rec_queue_validate(block, rec, index, offsets));
5350
5219
 
5351
 
        if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
5352
 
                err = DB_SUCCESS;
5353
 
        }
5354
 
 
5355
5220
        return(err);
5356
5221
}
5357
5222
 
5362
5227
puts the transaction and the query thread to the lock wait state and inserts a
5363
5228
waiting request for a record lock to the lock queue. Sets the requested mode
5364
5229
lock on the record.
5365
 
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
5366
 
or DB_QUE_THR_SUSPENDED */
 
5230
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
5367
5231
UNIV_INTERN
5368
 
enum db_err
 
5232
ulint
5369
5233
lock_clust_rec_read_check_and_lock(
5370
5234
/*===============================*/
5371
5235
        ulint                   flags,  /*!< in: if BTR_NO_LOCKING_FLAG
5386
5250
                                        LOCK_REC_NOT_GAP */
5387
5251
        que_thr_t*              thr)    /*!< in: query thread */
5388
5252
{
5389
 
        enum db_err     err;
5390
 
        ulint           heap_no;
 
5253
        ulint   err;
 
5254
        ulint   heap_no;
5391
5255
 
5392
5256
        ut_ad(dict_index_is_clust(index));
5393
5257
        ut_ad(block->frame == page_align(rec));
5458
5322
        mem_heap_t*     tmp_heap        = NULL;
5459
5323
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
5460
5324
        ulint*          offsets         = offsets_;
5461
 
        ulint           err;
 
5325
        ulint           ret;
5462
5326
        rec_offs_init(offsets_);
5463
5327
 
5464
5328
        offsets = rec_get_offsets(rec, index, offsets,
5465
5329
                                  ULINT_UNDEFINED, &tmp_heap);
5466
 
        err = lock_clust_rec_read_check_and_lock(flags, block, rec, index,
 
5330
        ret = lock_clust_rec_read_check_and_lock(flags, block, rec, index,
5467
5331
                                                 offsets, mode, gap_mode, thr);
5468
5332
        if (tmp_heap) {
5469
5333
                mem_heap_free(tmp_heap);
5470
5334
        }
5471
 
 
5472
 
        if (UNIV_UNLIKELY(err == DB_SUCCESS_LOCKED_REC)) {
5473
 
                err = DB_SUCCESS;
5474
 
        }
5475
 
 
5476
 
        return(err);
 
5335
        return(ret);
5477
5336
}
5478
5337
 
5479
5338
/*******************************************************************//**
5492
5351
 
5493
5352
        /* The lock to be release must be the last lock acquired. */
5494
5353
        last = ib_vector_size(autoinc_locks) - 1;
5495
 
        lock = static_cast<lock_t *>(ib_vector_get(autoinc_locks, last));
 
5354
        lock = ib_vector_get(autoinc_locks, last);
5496
5355
 
5497
5356
        /* Should have only AUTOINC locks in the vector. */
5498
5357
        ut_a(lock_get_mode(lock) == LOCK_AUTO_INC);
5505
5364
}
5506
5365
 
5507
5366
/*******************************************************************//**
5508
 
Check if a transaction holds any autoinc locks. 
5509
 
@return TRUE if the transaction holds any AUTOINC locks. */
5510
 
UNIV_INTERN
5511
 
ibool
5512
 
lock_trx_holds_autoinc_locks(
5513
 
/*=========================*/
5514
 
        const trx_t*    trx)            /*!< in: transaction */
5515
 
{
5516
 
        ut_a(trx->autoinc_locks != NULL);
5517
 
 
5518
 
        return(!ib_vector_is_empty(trx->autoinc_locks));
5519
 
}
5520
 
 
5521
 
/*******************************************************************//**
5522
5367
Release all the transaction's autoinc locks. */
5523
5368
UNIV_INTERN
5524
5369
void
5561
5406
Gets the id of the transaction owning a lock.
5562
5407
@return transaction id */
5563
5408
UNIV_INTERN
5564
 
trx_id_t
 
5409
ullint
5565
5410
lock_get_trx_id(
5566
5411
/*============*/
5567
5412
        const lock_t*   lock)   /*!< in: lock */
5568
5413
{
5569
 
        return(lock->trx->id);
 
5414
        return(trx_get_id(lock->trx));
5570
5415
}
5571
5416
 
5572
5417
/*******************************************************************//**
5660
5505
Gets the id of the table on which the lock is.
5661
5506
@return id of the table */
5662
5507
UNIV_INTERN
5663
 
table_id_t
 
5508
ullint
5664
5509
lock_get_table_id(
5665
5510
/*==============*/
5666
5511
        const lock_t*   lock)   /*!< in: lock */
5669
5514
 
5670
5515
        table = lock_get_table(lock);
5671
5516
 
5672
 
        return(table->id);
 
5517
        return((ullint)ut_conv_dulint_to_longlong(table->id));
5673
5518
}
5674
5519
 
5675
5520
/*******************************************************************//**