~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/lock/lock0lock.c

  • Committer: Brian Aker
  • Date: 2010-12-19 06:20:54 UTC
  • mfrom: (2005.1.1 bug673105)
  • Revision ID: brian@tangent.org-20101219062054-1kt0l3dxs4z2z8md
Merge Dave.

Show diffs side-by-side

added added

removed removed

Lines of Context:
566
566
/*============*/
567
567
        ulint   n_cells)        /*!< in: number of slots in lock hash table */
568
568
{
569
 
        lock_sys = static_cast<lock_sys_t *>(mem_alloc(sizeof(lock_sys_t)));
 
569
        lock_sys = mem_alloc(sizeof(lock_sys_t));
570
570
 
571
571
        lock_sys->rec_hash = hash_create(n_cells);
572
572
 
615
615
{
616
616
        ut_ad(lock);
617
617
 
618
 
        return static_cast<lock_mode>(lock->type_mode & LOCK_MODE_MASK);
 
618
        return(lock->type_mode & LOCK_MODE_MASK);
619
619
}
620
620
 
621
621
/*********************************************************************//**
918
918
        ut_ad(lock_get_type_low(lock2) == LOCK_REC);
919
919
 
920
920
        if (trx != lock2->trx
921
 
            && !lock_mode_compatible(static_cast<lock_mode>(LOCK_MODE_MASK & type_mode),
 
921
            && !lock_mode_compatible(LOCK_MODE_MASK & type_mode,
922
922
                                     lock_get_mode(lock2))) {
923
923
 
924
924
                /* We have somewhat complex rules when gap type record locks
1113
1113
        page_no = lock->un_member.rec_lock.page_no;
1114
1114
 
1115
1115
        for (;;) {
1116
 
                lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
 
1116
                lock = HASH_GET_NEXT(hash, lock);
1117
1117
 
1118
1118
                if (!lock) {
1119
1119
 
1145
1145
 
1146
1146
        ut_ad(mutex_own(&kernel_mutex));
1147
1147
 
1148
 
        lock = static_cast<lock_t *>(HASH_GET_FIRST(lock_sys->rec_hash,
1149
 
                              lock_rec_hash(space, page_no)));
 
1148
        lock = HASH_GET_FIRST(lock_sys->rec_hash,
 
1149
                              lock_rec_hash(space, page_no));
1150
1150
        while (lock) {
1151
1151
                if ((lock->un_member.rec_lock.space == space)
1152
1152
                    && (lock->un_member.rec_lock.page_no == page_no)) {
1154
1154
                        break;
1155
1155
                }
1156
1156
 
1157
 
                lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
 
1157
                lock = HASH_GET_NEXT(hash, lock);
1158
1158
        }
1159
1159
 
1160
1160
        return(lock);
1204
1204
 
1205
1205
        hash = buf_block_get_lock_hash_val(block);
1206
1206
 
1207
 
        lock = static_cast<lock_t *>(HASH_GET_FIRST(lock_sys->rec_hash, hash));
 
1207
        lock = HASH_GET_FIRST(lock_sys->rec_hash, hash);
1208
1208
 
1209
1209
        while (lock) {
1210
1210
                if ((lock->un_member.rec_lock.space == space)
1213
1213
                        break;
1214
1214
                }
1215
1215
 
1216
 
                lock = static_cast<lock_t *>(HASH_GET_NEXT(hash, lock));
 
1216
                lock = HASH_GET_NEXT(hash, lock);
1217
1217
        }
1218
1218
 
1219
1219
        return(lock);
1303
1303
 
1304
1304
        size = sizeof(lock_t) + lock_rec_get_n_bits(lock) / 8;
1305
1305
 
1306
 
        return static_cast<lock_t *>(mem_heap_dup(heap, lock, size));
 
1306
        return(mem_heap_dup(heap, lock, size));
1307
1307
}
1308
1308
 
1309
1309
/*********************************************************************//**
1418
1418
        while (lock) {
1419
1419
                if (lock->trx == trx
1420
1420
                    && lock_mode_stronger_or_eq(lock_get_mode(lock),
1421
 
                                                static_cast<lock_mode>(precise_mode & LOCK_MODE_MASK))
 
1421
                                                precise_mode & LOCK_MODE_MASK)
1422
1422
                    && !lock_get_wait(lock)
1423
1423
                    && (!lock_rec_get_rec_not_gap(lock)
1424
1424
                        || (precise_mode & LOCK_REC_NOT_GAP)
1694
1694
        n_bits = page_dir_get_n_heap(page) + LOCK_PAGE_BITMAP_MARGIN;
1695
1695
        n_bytes = 1 + n_bits / 8;
1696
1696
 
1697
 
        lock = static_cast<lock_t *>(mem_heap_alloc(trx->lock_heap, sizeof(lock_t) + n_bytes));
 
1697
        lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t) + n_bytes);
1698
1698
 
1699
1699
        UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
1700
1700
 
2050
2050
                /* The trx already has a strong enough lock on rec: do
2051
2051
                nothing */
2052
2052
 
2053
 
        } else if (lock_rec_other_has_conflicting(static_cast<lock_mode>(mode), block, heap_no, trx)) {
 
2053
        } else if (lock_rec_other_has_conflicting(mode, block, heap_no, trx)) {
2054
2054
 
2055
2055
                /* If another transaction has a non-gap conflicting request in
2056
2056
                the queue, as this transaction does not have a lock strong
3595
3595
 
3596
3596
                ib_vector_push(trx->autoinc_locks, lock);
3597
3597
        } else {
3598
 
                lock = static_cast<lock_t *>(mem_heap_alloc(trx->lock_heap, sizeof(lock_t)));
 
3598
                lock = mem_heap_alloc(trx->lock_heap, sizeof(lock_t));
3599
3599
        }
3600
3600
 
3601
3601
        UT_LIST_ADD_LAST(trx_locks, trx->trx_locks, lock);
3656
3656
                    && !ib_vector_is_empty(trx->autoinc_locks)) {
3657
3657
                        lock_t* autoinc_lock;
3658
3658
 
3659
 
                        autoinc_lock = static_cast<lock_t *>(ib_vector_pop(trx->autoinc_locks));
 
3659
                        autoinc_lock = ib_vector_pop(trx->autoinc_locks);
3660
3660
                        ut_a(autoinc_lock == lock);
3661
3661
                }
3662
3662
 
4853
4853
                        ut_a(rec);
4854
4854
                        offsets = rec_get_offsets(rec, index, offsets,
4855
4855
                                                  ULINT_UNDEFINED, &heap);
4856
 
#if 0
 
4856
 
4857
4857
                        fprintf(stderr,
4858
4858
                                "Validating %lu %lu\n",
4859
4859
                                (ulong) space, (ulong) page_no);
4860
 
#endif
 
4860
 
4861
4861
                        lock_mutex_exit_kernel();
4862
4862
 
4863
4863
                        /* If this thread is holding the file space
5052
5052
        on the successor, which produced an unnecessary deadlock. */
5053
5053
 
5054
5054
        if (lock_rec_other_has_conflicting(
5055
 
                    static_cast<lock_mode>(LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION),
 
5055
                    LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION,
5056
5056
                    block, next_rec_heap_no, trx)) {
5057
5057
 
5058
5058
                /* Note that we may get DB_SUCCESS also here! */
5492
5492
 
5493
5493
        /* The lock to be release must be the last lock acquired. */
5494
5494
        last = ib_vector_size(autoinc_locks) - 1;
5495
 
        lock = static_cast<lock_t *>(ib_vector_get(autoinc_locks, last));
 
5495
        lock = ib_vector_get(autoinc_locks, last);
5496
5496
 
5497
5497
        /* Should have only AUTOINC locks in the vector. */
5498
5498
        ut_a(lock_get_mode(lock) == LOCK_AUTO_INC);