19
19
#include "btr0btr.h"
22
/* Flag: has the search system been disabled? */
23
UNIV_INTERN ibool btr_search_disabled = FALSE;
25
/* A dummy variable to fool the compiler */
26
UNIV_INTERN ulint btr_search_this_is_zero = 0;
22
ulint btr_search_this_is_zero = 0; /* A dummy variable to fool the
28
25
#ifdef UNIV_SEARCH_PERF_STAT
29
UNIV_INTERN ulint btr_search_n_succ = 0;
30
UNIV_INTERN ulint btr_search_n_hash_fail = 0;
26
ulint btr_search_n_succ = 0;
27
ulint btr_search_n_hash_fail = 0;
31
28
#endif /* UNIV_SEARCH_PERF_STAT */
33
/* padding to prevent other memory update
34
hotspots from residing on the same memory
35
cache line as btr_search_latch */
36
UNIV_INTERN byte btr_sea_pad1[64];
30
byte btr_sea_pad1[64]; /* padding to prevent other memory update
31
hotspots from residing on the same memory
32
cache line as btr_search_latch */
38
34
/* The latch protecting the adaptive search system: this latch protects the
39
35
(1) positions of records on those pages where a hash index has been built.
41
37
being updated in-place! We can use fact (1) to perform unique searches to
44
/* We will allocate the latch from dynamic memory to get it to the
45
same DRAM page as other hotspot semaphores */
46
UNIV_INTERN rw_lock_t* btr_search_latch_temp;
48
/* padding to prevent other memory update hotspots from residing on
49
the same memory cache line */
50
UNIV_INTERN byte btr_sea_pad2[64];
52
UNIV_INTERN btr_search_sys_t* btr_search_sys;
40
rw_lock_t* btr_search_latch_temp; /* We will allocate the latch from
41
dynamic memory to get it to the
42
same DRAM page as other hotspot
45
byte btr_sea_pad2[64]; /* padding to prevent other memory update
46
hotspots from residing on the same memory
49
btr_search_sys_t* btr_search_sys;
54
51
/* If the number of records on the page divided by this parameter
55
52
would have been successfully accessed using a hash index, the index
73
70
/*=============================*/
74
71
dict_index_t* index, /* in: index for which to build, or NULL if
76
buf_block_t* block, /* in: index page, s- or x-latched */
73
page_t* page, /* in: index page, s- or x-latched */
77
74
ulint n_fields,/* in: hash this many full fields */
78
75
ulint n_bytes,/* in: hash this many bytes from the next
111
109
be enough free space in the hash table. */
113
111
if (heap->free_block == NULL) {
114
buf_block_t* block = buf_block_alloc(0);
112
frame = buf_frame_alloc();
116
114
rw_lock_x_lock(&btr_search_latch);
118
116
if (heap->free_block == NULL) {
119
heap->free_block = block;
117
heap->free_block = frame;
121
buf_block_free(block);
119
buf_frame_free(frame);
124
122
rw_lock_x_unlock(&btr_search_latch);
143
141
btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
145
btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
149
/************************************************************************
150
Disable the adaptive hash search system and empty the index. */
153
btr_search_disable(void)
154
/*====================*/
156
btr_search_disabled = TRUE;
157
rw_lock_x_lock(&btr_search_latch);
159
ha_clear(btr_search_sys->hash_index);
161
rw_lock_x_unlock(&btr_search_latch);
164
/************************************************************************
165
Enable the adaptive hash search system. */
168
btr_search_enable(void)
169
/*====================*/
171
btr_search_disabled = FALSE;
143
btr_search_sys->hash_index = ha_create(TRUE, hash_size, 0, 0);
174
147
/*********************************************************************
175
148
Creates and initializes a search info struct. */
178
151
btr_search_info_create(
179
152
/*===================*/
349
322
#ifdef UNIV_SYNC_DEBUG
350
323
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
351
324
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
352
ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
353
|| rw_lock_own(&block->lock, RW_LOCK_EX));
325
ut_ad(rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_SHARED)
326
|| rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_EX));
354
327
#endif /* UNIV_SYNC_DEBUG */
357
330
info->last_hash_succ = FALSE;
359
ut_a(buf_block_state_valid(block));
332
ut_a(block->magic_n == BUF_BLOCK_MAGIC_N);
360
333
ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
362
335
if ((block->n_hash_helps > 0)
436
409
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
437
410
|| rw_lock_own(&(block->lock), RW_LOCK_EX));
438
411
#endif /* UNIV_SYNC_DEBUG */
439
ut_ad(page_align(btr_cur_get_rec(cursor))
440
== buf_block_get_frame(block));
442
if (!block->is_hashed) {
447
ut_a(block->index == cursor->index);
448
ut_a(!dict_index_is_ibuf(cursor->index));
450
if ((info->n_hash_potential > 0)
412
ut_ad(buf_block_align(btr_cur_get_rec(cursor)) == block);
413
ut_a(!block->is_hashed || block->index == cursor->index);
416
&& (info->n_hash_potential > 0)
451
417
&& (block->curr_n_fields == info->n_fields)
452
418
&& (block->curr_n_bytes == info->n_bytes)
453
419
&& (block->curr_left_side == info->left_side)) {
454
420
mem_heap_t* heap = NULL;
455
421
ulint offsets_[REC_OFFS_NORMAL_SIZE];
456
rec_offs_init(offsets_);
422
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
458
424
rec = btr_cur_get_rec(cursor);
475
441
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
476
442
#endif /* UNIV_SYNC_DEBUG */
478
ha_insert_for_fold(btr_search_sys->hash_index, fold,
444
ha_insert_for_fold(btr_search_sys->hash_index, fold, rec);
483
448
/*************************************************************************
484
449
Updates the search info. */
487
452
btr_search_info_update_slow(
488
453
/*========================*/
499
464
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
500
465
#endif /* UNIV_SYNC_DEBUG */
502
block = btr_cur_get_block(cursor);
467
block = buf_block_align(btr_cur_get_rec(cursor));
504
469
/* NOTE that the following two function calls do NOT protect
505
470
info or block->n_fields etc. with any semaphore, to save CPU time!
575
540
protected, not the next or previous record
576
541
in the chain: we cannot look at the next or
577
542
previous record to check our guess! */
578
const dtuple_t* tuple, /* in: data tuple */
543
dtuple_t* tuple, /* in: data tuple */
579
544
ulint mode, /* in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,
580
545
or PAGE_CUR_GE */
581
546
mtr_t* mtr) /* in: mtr */
650
615
prev_rec = page_rec_get_prev(rec);
652
617
if (page_rec_is_infimum(prev_rec)) {
653
success = btr_page_get_prev(page_align(prev_rec), mtr)
618
success = btr_page_get_prev(
619
buf_frame_align(prev_rec), mtr) == FIL_NULL;
675
640
next_rec = page_rec_get_next(rec);
677
642
if (page_rec_is_supremum(next_rec)) {
678
if (btr_page_get_next(page_align(next_rec), mtr)
643
if (btr_page_get_next(
644
buf_frame_align(next_rec), mtr)
681
647
cursor->up_match = 0;
708
674
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
709
675
and the function returns TRUE, then cursor->up_match and cursor->low_match
710
676
both have sensible values. */
713
679
btr_search_guess_on_hash(
714
680
/*=====================*/
715
681
/* out: TRUE if succeeded */
716
682
dict_index_t* index, /* in: index */
717
683
btr_search_t* info, /* in: index search info */
718
const dtuple_t* tuple, /* in: logical record */
684
dtuple_t* tuple, /* in: logical record */
719
685
ulint mode, /* in: PAGE_CUR_L, ... */
720
686
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
721
687
NOTE that only if has_search_latch
753
721
cursor->n_fields = info->n_fields;
754
722
cursor->n_bytes = info->n_bytes;
756
if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
757
< cursor->n_fields + (cursor->n_bytes > 0))) {
724
tuple_n_fields = dtuple_get_n_fields(tuple);
726
if (UNIV_UNLIKELY(tuple_n_fields < cursor->n_fields)) {
731
if (UNIV_UNLIKELY(tuple_n_fields == cursor->n_fields)
732
&& (cursor->n_bytes > 0)) {
782
757
goto failure_unlock;
785
page = page_align(rec);
787
ulint page_no = page_get_page_no(page);
788
ulint space_id = page_get_space_id(page);
790
buf_pool_mutex_enter();
791
block = (buf_block_t*) buf_page_hash_get(space_id, page_no);
792
buf_pool_mutex_exit();
795
if (UNIV_UNLIKELY(!block)
796
|| UNIV_UNLIKELY(buf_block_get_state(block)
797
!= BUF_BLOCK_FILE_PAGE)) {
799
/* The block is most probably being freed.
800
The function buf_LRU_search_and_free_block()
801
first removes the block from buf_pool->page_hash
802
by calling buf_LRU_block_remove_hashed_page().
803
After that, it invokes btr_search_drop_page_hash_index().
804
Let us pretend that the block was also removed from
805
the adaptive hash index. */
760
page = buf_frame_align(rec);
809
762
if (UNIV_LIKELY(!has_search_latch)) {
811
764
if (UNIV_UNLIKELY(
812
!buf_page_get_known_nowait(latch_mode, block,
765
!buf_page_get_known_nowait(latch_mode, page,
814
767
__FILE__, __LINE__,
819
772
rw_lock_s_unlock(&btr_search_latch);
773
can_only_compare_to_cursor_rec = FALSE;
821
775
#ifdef UNIV_SYNC_DEBUG
822
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
776
buf_page_dbg_add_level(page, SYNC_TREE_NODE_FROM_HASH);
823
777
#endif /* UNIV_SYNC_DEBUG */
826
if (UNIV_UNLIKELY(buf_block_get_state(block)
827
== BUF_BLOCK_REMOVE_HASH)) {
780
block = buf_block_align(page);
782
if (UNIV_UNLIKELY(block->state == BUF_BLOCK_REMOVE_HASH)) {
828
783
if (UNIV_LIKELY(!has_search_latch)) {
830
btr_leaf_page_release(block, latch_mode, mtr);
785
btr_leaf_page_release(page, latch_mode, mtr);
836
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
791
ut_ad(block->state == BUF_BLOCK_FILE_PAGE);
837
792
ut_ad(page_rec_is_user_rec(rec));
839
btr_cur_position(index, rec, block, cursor);
794
btr_cur_position(index, rec, cursor);
841
796
/* Check the validity of the guess within the page */
849
804
ut_dulint_cmp(index_id, btr_page_get_index_id(page)), 0)
850
805
|| !btr_search_check_guess(cursor,
806
can_only_compare_to_cursor_rec,
852
807
tuple, mode, mtr)) {
853
808
if (UNIV_LIKELY(!has_search_latch)) {
854
btr_leaf_page_release(block, latch_mode, mtr);
809
btr_leaf_page_release(page, latch_mode, mtr);
871
826
/* Currently, does not work if the following fails: */
872
827
ut_ad(!has_search_latch);
874
btr_leaf_page_release(block, latch_mode, mtr);
829
btr_leaf_page_release(page, latch_mode, mtr);
876
831
btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
877
832
&cursor2, 0, mtr);
901
856
btr_search_n_succ++;
903
858
if (UNIV_LIKELY(!has_search_latch)
904
&& buf_page_peek_if_too_old(&block->page)) {
859
&& buf_block_peek_if_too_old(block)) {
906
buf_page_make_young(&block->page);
861
buf_page_make_young(page);
909
864
/* Increment the page get statistics though we did not really
936
891
/************************************************************************
937
892
Drops a page hash index. */
940
895
btr_search_drop_page_hash_index(
941
896
/*============================*/
942
buf_block_t* block) /* in: block containing index page,
943
s- or x-latched, or an index page
944
for which we know that
945
block->buf_fix_count == 0 */
897
page_t* page) /* in: index page, s- or x-latched, or an index page
898
for which we know that block->buf_fix_count == 0 */
947
900
hash_table_t* table;
964
917
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
965
918
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
966
919
#endif /* UNIV_SYNC_DEBUG */
969
921
rw_lock_s_lock(&btr_search_latch);
923
block = buf_block_align(page);
972
925
if (UNIV_LIKELY(!block->is_hashed)) {
981
934
#ifdef UNIV_SYNC_DEBUG
982
935
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
983
936
|| rw_lock_own(&(block->lock), RW_LOCK_EX)
984
|| (block->page.buf_fix_count == 0));
937
|| (block->buf_fix_count == 0));
985
938
#endif /* UNIV_SYNC_DEBUG */
987
940
n_fields = block->curr_n_fields;
988
941
n_bytes = block->curr_n_bytes;
989
942
index = block->index;
990
ut_a(!dict_index_is_ibuf(index));
992
944
/* NOTE: The fields of block must not be accessed after
993
945
releasing btr_search_latch, as the index page might only
1099
1047
/************************************************************************
1100
1048
Drops a page hash index when a page is freed from a fseg to the file system.
1101
1049
Drops possible hash index if the page happens to be in the buffer pool. */
1104
1052
btr_search_drop_page_hash_when_freed(
1105
1053
/*=================================*/
1106
1054
ulint space, /* in: space id */
1107
ulint zip_size, /* in: compressed page size in bytes
1108
or 0 for uncompressed pages */
1109
1055
ulint page_no) /* in: page number */
1114
if (!buf_page_peek_if_search_hashed(space, page_no)) {
1061
is_hashed = buf_page_peek_if_search_hashed(space, page_no);
1123
1072
get here. Therefore we can acquire the s-latch to the page without
1124
1073
having to fear a deadlock. */
1126
block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
1075
page = buf_page_get_gen(space, page_no, RW_S_LATCH, NULL,
1127
1076
BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
1130
1079
#ifdef UNIV_SYNC_DEBUG
1131
buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1080
buf_page_dbg_add_level(page, SYNC_TREE_NODE_FROM_HASH);
1132
1081
#endif /* UNIV_SYNC_DEBUG */
1134
btr_search_drop_page_hash_index(block);
1083
btr_search_drop_page_hash_index(page);
1136
1085
mtr_commit(&mtr);
1146
1095
btr_search_build_page_hash_index(
1147
1096
/*=============================*/
1148
1097
dict_index_t* index, /* in: index for which to build */
1149
buf_block_t* block, /* in: index page, s- or x-latched */
1098
page_t* page, /* in: index page, s- or x-latched */
1150
1099
ulint n_fields,/* in: hash this many full fields */
1151
1100
ulint n_bytes,/* in: hash this many bytes from the next
1153
1102
ibool left_side)/* in: hash for searches from left side? */
1155
1104
hash_table_t* table;
1158
1107
rec_t* next_rec;
1167
1116
mem_heap_t* heap = NULL;
1168
1117
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1169
1118
ulint* offsets = offsets_;
1170
rec_offs_init(offsets_);
1119
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
1173
ut_a(!dict_index_is_ibuf(index));
1123
block = buf_block_align(page);
1175
1124
table = btr_search_sys->hash_index;
1176
page = buf_block_get_frame(block);
1178
1126
#ifdef UNIV_SYNC_DEBUG
1179
1127
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1225
1173
index_id = btr_page_get_index_id(page);
1227
rec = page_rec_get_next(page_get_infimum_rec(page));
1175
rec = page_get_infimum_rec(page);
1176
rec = page_rec_get_next(rec);
1229
1178
offsets = rec_get_offsets(rec, index, offsets,
1230
1179
n_fields + (n_bytes > 0), &heap);
1306
1255
for (i = 0; i < n_cached; i++) {
1308
ha_insert_for_fold(table, folds[i], block, recs[i]);
1257
ha_insert_for_fold(table, folds[i], recs[i]);
1323
1272
then the hash index for page, if any, is dropped. If new_page is not hashed,
1324
1273
and page is hashed, then a new hash index is built to new_page with the same
1325
1274
parameters as page (this often happens when a page is split). */
1328
1277
btr_search_move_or_delete_hash_entries(
1329
1278
/*===================================*/
1330
buf_block_t* new_block, /* in: records are copied
1279
page_t* new_page, /* in: records are copied
1331
1280
to this page */
1332
buf_block_t* block, /* in: index page from which
1281
page_t* page, /* in: index page from which
1333
1282
records were copied, and the
1334
1283
copied records will be deleted
1335
1284
from this page */
1336
1285
dict_index_t* index) /* in: record descriptor */
1288
buf_block_t* new_block;
1293
block = buf_block_align(page);
1294
new_block = buf_block_align(new_page);
1295
ut_a(page_is_comp(page) == page_is_comp(new_page));
1342
1297
#ifdef UNIV_SYNC_DEBUG
1343
1298
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1345
1300
#endif /* UNIV_SYNC_DEBUG */
1346
1301
ut_a(!new_block->is_hashed || new_block->index == index);
1347
1302
ut_a(!block->is_hashed || block->index == index);
1348
ut_a(!(new_block->is_hashed || block->is_hashed)
1349
|| !dict_index_is_ibuf(index));
1351
1304
rw_lock_s_lock(&btr_search_latch);
1374
1327
ut_a(n_fields + n_bytes > 0);
1376
btr_search_build_page_hash_index(index, new_block, n_fields,
1329
btr_search_build_page_hash_index(index, new_page, n_fields,
1377
1330
n_bytes, left_side);
1378
ut_ad(n_fields == block->curr_n_fields);
1379
ut_ad(n_bytes == block->curr_n_bytes);
1380
ut_ad(left_side == block->curr_left_side);
1331
#if 1 /* TODO: safe to remove? */
1332
ut_a(n_fields == block->curr_n_fields);
1333
ut_a(n_bytes == block->curr_n_bytes);
1334
ut_a(left_side == block->curr_left_side);
1387
1342
/************************************************************************
1388
1343
Updates the page hash index when a single record is deleted from a page. */
1391
1346
btr_search_update_hash_on_delete(
1392
1347
/*=============================*/
1403
1358
ulint offsets_[REC_OFFS_NORMAL_SIZE];
1404
1359
mem_heap_t* heap = NULL;
1405
rec_offs_init(offsets_);
1360
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
1407
1362
rec = btr_cur_get_rec(cursor);
1409
block = btr_cur_get_block(cursor);
1364
block = buf_block_align(rec);
1411
1366
#ifdef UNIV_SYNC_DEBUG
1412
1367
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1440
1394
/************************************************************************
1441
1395
Updates the page hash index when a single record is inserted on a page. */
1444
1398
btr_search_update_hash_node_on_insert(
1445
1399
/*==================================*/
1455
1409
rec = btr_cur_get_rec(cursor);
1457
block = btr_cur_get_block(cursor);
1411
block = buf_block_align(rec);
1459
1413
#ifdef UNIV_SYNC_DEBUG
1460
1414
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1478
1431
table = btr_search_sys->hash_index;
1480
1433
ha_search_and_update_if_found(table, cursor->fold, rec,
1481
block, page_rec_get_next(rec));
1434
page_rec_get_next(rec));
1483
1436
rw_lock_x_unlock(&btr_search_latch);
1491
1444
/************************************************************************
1492
1445
Updates the page hash index when a single record is inserted on a page. */
1495
1448
btr_search_update_hash_on_insert(
1496
1449
/*=============================*/
1524
1477
rec = btr_cur_get_rec(cursor);
1526
block = btr_cur_get_block(cursor);
1479
block = buf_block_align(rec);
1528
1481
#ifdef UNIV_SYNC_DEBUG
1529
1482
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1586
1538
if (!left_side) {
1587
ha_insert_for_fold(table, fold, block, rec);
1539
ha_insert_for_fold(table, fold, rec);
1589
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1541
ha_insert_for_fold(table, ins_fold, ins_rec);
1619
1571
if (!left_side) {
1621
ha_insert_for_fold(table, ins_fold, block, ins_rec);
1573
ha_insert_for_fold(table, ins_fold, ins_rec);
1623
1575
fputs("Hash insert for ", stderr);
1624
1576
dict_index_name_print(stderr, cursor->index);
1625
1577
fprintf(stderr, " fold %lu\n", ins_fold);
1628
ha_insert_for_fold(table, next_fold, block, next_rec);
1580
ha_insert_for_fold(table, next_fold, next_rec);
1671
1623
/* We release btr_search_latch every once in a while to
1672
1624
give other queries a chance to run. */
1673
1625
if ((i != 0) && ((i % chunk_size) == 0)) {
1674
buf_pool_mutex_exit();
1675
1626
rw_lock_x_unlock(&btr_search_latch);
1676
1627
os_thread_yield();
1677
1628
rw_lock_x_lock(&btr_search_latch);
1678
buf_pool_mutex_enter();
1681
1631
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
1683
for (; node != NULL; node = node->next) {
1684
const buf_block_t* block;
1686
page = page_align(node->data);
1688
ulint page_no = page_get_page_no(page);
1689
ulint space_id= page_get_space_id(page);
1691
block = buf_block_hash_get(space_id, page_no);
1694
if (UNIV_UNLIKELY(!block)) {
1696
/* The block is most probably being freed.
1697
The function buf_LRU_search_and_free_block()
1698
first removes the block from
1699
buf_pool->page_hash by calling
1700
buf_LRU_block_remove_hashed_page().
1701
After that, it invokes
1702
btr_search_drop_page_hash_index().
1703
Let us pretend that the block was also removed
1704
from the adaptive hash index. */
1708
ut_a(!dict_index_is_ibuf(block->index));
1710
offsets = rec_get_offsets((const rec_t*) node->data,
1633
while (node != NULL) {
1634
block = buf_block_align(node->data);
1635
page = buf_frame_align(node->data);
1636
offsets = rec_get_offsets((rec_t*) node->data,
1711
1637
block->index, offsets,
1712
1638
block->curr_n_fields
1713
1639
+ (block->curr_n_bytes > 0),
1728
1654
"InnoDB: ptr mem address %p"
1729
1655
" index id %lu %lu,"
1730
1656
" node fold %lu, rec fold %lu\n",
1731
(ulong) page_get_page_no(page),
1657
(ulong) buf_frame_get_page_no(page),
1733
1659
(ulong) ut_dulint_get_high(
1734
1660
btr_page_get_index_id(page)),
1768
1696
/* We release btr_search_latch every once in a while to
1769
1697
give other queries a chance to run. */
1771
buf_pool_mutex_exit();
1772
1699
rw_lock_x_unlock(&btr_search_latch);
1773
1700
os_thread_yield();
1774
1701
rw_lock_x_lock(&btr_search_latch);
1775
buf_pool_mutex_enter();
1778
1704
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {