~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to storage/innobase/btr/btr0sea.c

  • Committer: Elan Ruusamäe
  • Date: 2008-12-04 19:38:26 UTC
  • mfrom: (644 drizzle)
  • mto: (641.3.10 devel)
  • mto: This revision was merged to the branch mainline in revision 649.
  • Revision ID: glen@haarber.alkohol.ee-20081204193826-xxyhd45ag121pf3z
- pull from trunk

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/************************************************************************
2
 
The index tree adaptive search
3
 
 
4
 
(c) 1996 Innobase Oy
5
 
 
6
 
Created 2/17/1996 Heikki Tuuri
7
 
*************************************************************************/
8
 
 
9
 
#include "btr0sea.h"
10
 
#ifdef UNIV_NONINL
11
 
#include "btr0sea.ic"
12
 
#endif
13
 
 
14
 
#include "buf0buf.h"
15
 
#include "page0page.h"
16
 
#include "page0cur.h"
17
 
#include "btr0cur.h"
18
 
#include "btr0pcur.h"
19
 
#include "btr0btr.h"
20
 
#include "ha0ha.h"
21
 
 
22
 
/* Flag: has the search system been disabled? */
23
 
UNIV_INTERN ibool               btr_search_disabled     = FALSE;
24
 
 
25
 
/* A dummy variable to fool the compiler */
26
 
UNIV_INTERN ulint               btr_search_this_is_zero = 0;
27
 
 
28
 
#ifdef UNIV_SEARCH_PERF_STAT
29
 
UNIV_INTERN ulint               btr_search_n_succ       = 0;
30
 
UNIV_INTERN ulint               btr_search_n_hash_fail  = 0;
31
 
#endif /* UNIV_SEARCH_PERF_STAT */
32
 
 
33
 
/* padding to prevent other memory update
34
 
hotspots from residing on the same memory
35
 
cache line as btr_search_latch */
36
 
UNIV_INTERN byte                btr_sea_pad1[64];
37
 
 
38
 
/* The latch protecting the adaptive search system: this latch protects the
39
 
(1) positions of records on those pages where a hash index has been built.
40
 
NOTE: It does not protect values of non-ordering fields within a record from
41
 
being updated in-place! We can use fact (1) to perform unique searches to
42
 
indexes. */
43
 
 
44
 
/* We will allocate the latch from dynamic memory to get it to the
45
 
same DRAM page as other hotspot semaphores */
46
 
UNIV_INTERN rw_lock_t*          btr_search_latch_temp;
47
 
 
48
 
/* padding to prevent other memory update hotspots from residing on
49
 
the same memory cache line */
50
 
UNIV_INTERN byte                btr_sea_pad2[64];
51
 
 
52
 
UNIV_INTERN btr_search_sys_t*   btr_search_sys;
53
 
 
54
 
/* If the number of records on the page divided by this parameter
55
 
would have been successfully accessed using a hash index, the index
56
 
is then built on the page, assuming the global limit has been reached */
57
 
 
58
 
#define BTR_SEARCH_PAGE_BUILD_LIMIT     16
59
 
 
60
 
/* The global limit for consecutive potentially successful hash searches,
61
 
before hash index building is started */
62
 
 
63
 
#define BTR_SEARCH_BUILD_LIMIT          100
64
 
 
65
 
/************************************************************************
66
 
Builds a hash index on a page with the given parameters. If the page already
67
 
has a hash index with different parameters, the old hash index is removed.
68
 
If index is non-NULL, this function checks if n_fields and n_bytes are
69
 
sensible values, and does not build a hash index if not. */
70
 
static
71
 
void
72
 
btr_search_build_page_hash_index(
73
 
/*=============================*/
74
 
        dict_index_t*   index,  /* in: index for which to build, or NULL if
75
 
                                not known */
76
 
        buf_block_t*    block,  /* in: index page, s- or x-latched */
77
 
        ulint           n_fields,/* in: hash this many full fields */
78
 
        ulint           n_bytes,/* in: hash this many bytes from the next
79
 
                                field */
80
 
        ibool           left_side);/* in: hash for searches from left side? */
81
 
 
82
 
/*********************************************************************
83
 
This function should be called before reserving any btr search mutex, if
84
 
the intended operation might add nodes to the search system hash table.
85
 
Because of the latching order, once we have reserved the btr search system
86
 
latch, we cannot allocate a free frame from the buffer pool. Checks that
87
 
there is a free buffer frame allocated for hash table heap in the btr search
88
 
system. If not, allocates a free frames for the heap. This check makes it
89
 
probable that, when have reserved the btr search system latch and we need to
90
 
allocate a new node to the hash table, it will succeed. However, the check
91
 
will not guarantee success. */
92
 
static
93
 
void
94
 
btr_search_check_free_space_in_heap(void)
95
 
/*=====================================*/
96
 
{
97
 
        hash_table_t*   table;
98
 
        mem_heap_t*     heap;
99
 
 
100
 
#ifdef UNIV_SYNC_DEBUG
101
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
102
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
103
 
#endif /* UNIV_SYNC_DEBUG */
104
 
 
105
 
        table = btr_search_sys->hash_index;
106
 
 
107
 
        heap = table->heap;
108
 
 
109
 
        /* Note that we peek the value of heap->free_block without reserving
110
 
        the latch: this is ok, because we will not guarantee that there will
111
 
        be enough free space in the hash table. */
112
 
 
113
 
        if (heap->free_block == NULL) {
114
 
                buf_block_t*    block = buf_block_alloc(0);
115
 
 
116
 
                rw_lock_x_lock(&btr_search_latch);
117
 
 
118
 
                if (heap->free_block == NULL) {
119
 
                        heap->free_block = block;
120
 
                } else {
121
 
                        buf_block_free(block);
122
 
                }
123
 
 
124
 
                rw_lock_x_unlock(&btr_search_latch);
125
 
        }
126
 
}
127
 
 
128
 
/*********************************************************************
129
 
Creates and initializes the adaptive search system at a database start. */
130
 
UNIV_INTERN
131
 
void
132
 
btr_search_sys_create(
133
 
/*==================*/
134
 
        ulint   hash_size)      /* in: hash index hash table size */
135
 
{
136
 
        /* We allocate the search latch from dynamic memory:
137
 
        see above at the global variable definition */
138
 
 
139
 
        btr_search_latch_temp = mem_alloc(sizeof(rw_lock_t));
140
 
 
141
 
        rw_lock_create(&btr_search_latch, SYNC_SEARCH_SYS);
142
 
 
143
 
        btr_search_sys = mem_alloc(sizeof(btr_search_sys_t));
144
 
 
145
 
        btr_search_sys->hash_index = ha_create(hash_size, 0, 0);
146
 
 
147
 
}
148
 
 
149
 
/************************************************************************
150
 
Disable the adaptive hash search system and empty the index. */
151
 
UNIV_INTERN
152
 
void
153
 
btr_search_disable(void)
154
 
/*====================*/
155
 
{
156
 
        btr_search_disabled = TRUE;
157
 
        rw_lock_x_lock(&btr_search_latch);
158
 
 
159
 
        ha_clear(btr_search_sys->hash_index);
160
 
 
161
 
        rw_lock_x_unlock(&btr_search_latch);
162
 
}
163
 
 
164
 
/************************************************************************
165
 
Enable the adaptive hash search system. */
166
 
UNIV_INTERN
167
 
void
168
 
btr_search_enable(void)
169
 
/*====================*/
170
 
{
171
 
        btr_search_disabled = FALSE;
172
 
}
173
 
 
174
 
/*********************************************************************
175
 
Creates and initializes a search info struct. */
176
 
UNIV_INTERN
177
 
btr_search_t*
178
 
btr_search_info_create(
179
 
/*===================*/
180
 
                                /* out, own: search info struct */
181
 
        mem_heap_t*     heap)   /* in: heap where created */
182
 
{
183
 
        btr_search_t*   info;
184
 
 
185
 
        info = mem_heap_alloc(heap, sizeof(btr_search_t));
186
 
 
187
 
#ifdef UNIV_DEBUG
188
 
        info->magic_n = BTR_SEARCH_MAGIC_N;
189
 
#endif /* UNIV_DEBUG */
190
 
 
191
 
        info->root_guess = NULL;
192
 
 
193
 
        info->hash_analysis = 0;
194
 
        info->n_hash_potential = 0;
195
 
 
196
 
        info->last_hash_succ = FALSE;
197
 
 
198
 
#ifdef UNIV_SEARCH_PERF_STAT
199
 
        info->n_hash_succ = 0;
200
 
        info->n_hash_fail = 0;
201
 
        info->n_patt_succ = 0;
202
 
        info->n_searches = 0;
203
 
#endif /* UNIV_SEARCH_PERF_STAT */
204
 
 
205
 
        /* Set some sensible values */
206
 
        info->n_fields = 1;
207
 
        info->n_bytes = 0;
208
 
 
209
 
        info->left_side = TRUE;
210
 
 
211
 
        return(info);
212
 
}
213
 
 
214
 
/*************************************************************************
215
 
Updates the search info of an index about hash successes. NOTE that info
216
 
is NOT protected by any semaphore, to save CPU time! Do not assume its fields
217
 
are consistent. */
218
 
static
219
 
void
220
 
btr_search_info_update_hash(
221
 
/*========================*/
222
 
        btr_search_t*   info,   /* in/out: search info */
223
 
        btr_cur_t*      cursor) /* in: cursor which was just positioned */
224
 
{
225
 
        dict_index_t*   index;
226
 
        ulint           n_unique;
227
 
        int             cmp;
228
 
 
229
 
#ifdef UNIV_SYNC_DEBUG
230
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
231
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
232
 
#endif /* UNIV_SYNC_DEBUG */
233
 
 
234
 
        index = cursor->index;
235
 
 
236
 
        if (dict_index_is_ibuf(index)) {
237
 
                /* So many deletes are performed on an insert buffer tree
238
 
                that we do not consider a hash index useful on it: */
239
 
 
240
 
                return;
241
 
        }
242
 
 
243
 
        n_unique = dict_index_get_n_unique_in_tree(index);
244
 
 
245
 
        if (info->n_hash_potential == 0) {
246
 
 
247
 
                goto set_new_recomm;
248
 
        }
249
 
 
250
 
        /* Test if the search would have succeeded using the recommended
251
 
        hash prefix */
252
 
 
253
 
        if (info->n_fields >= n_unique && cursor->up_match >= n_unique) {
254
 
increment_potential:
255
 
                info->n_hash_potential++;
256
 
 
257
 
                return;
258
 
        }
259
 
 
260
 
        cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
261
 
                          cursor->low_match, cursor->low_bytes);
262
 
 
263
 
        if (info->left_side ? cmp <= 0 : cmp > 0) {
264
 
 
265
 
                goto set_new_recomm;
266
 
        }
267
 
 
268
 
        cmp = ut_pair_cmp(info->n_fields, info->n_bytes,
269
 
                          cursor->up_match, cursor->up_bytes);
270
 
 
271
 
        if (info->left_side ? cmp <= 0 : cmp > 0) {
272
 
 
273
 
                goto increment_potential;
274
 
        }
275
 
 
276
 
set_new_recomm:
277
 
        /* We have to set a new recommendation; skip the hash analysis
278
 
        for a while to avoid unnecessary CPU time usage when there is no
279
 
        chance for success */
280
 
 
281
 
        info->hash_analysis = 0;
282
 
 
283
 
        cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes,
284
 
                          cursor->low_match, cursor->low_bytes);
285
 
        if (cmp == 0) {
286
 
                info->n_hash_potential = 0;
287
 
 
288
 
                /* For extra safety, we set some sensible values here */
289
 
 
290
 
                info->n_fields = 1;
291
 
                info->n_bytes = 0;
292
 
 
293
 
                info->left_side = TRUE;
294
 
 
295
 
        } else if (cmp > 0) {
296
 
                info->n_hash_potential = 1;
297
 
 
298
 
                if (cursor->up_match >= n_unique) {
299
 
 
300
 
                        info->n_fields = n_unique;
301
 
                        info->n_bytes = 0;
302
 
 
303
 
                } else if (cursor->low_match < cursor->up_match) {
304
 
 
305
 
                        info->n_fields = cursor->low_match + 1;
306
 
                        info->n_bytes = 0;
307
 
                } else {
308
 
                        info->n_fields = cursor->low_match;
309
 
                        info->n_bytes = cursor->low_bytes + 1;
310
 
                }
311
 
 
312
 
                info->left_side = TRUE;
313
 
        } else {
314
 
                info->n_hash_potential = 1;
315
 
 
316
 
                if (cursor->low_match >= n_unique) {
317
 
 
318
 
                        info->n_fields = n_unique;
319
 
                        info->n_bytes = 0;
320
 
 
321
 
                } else if (cursor->low_match > cursor->up_match) {
322
 
 
323
 
                        info->n_fields = cursor->up_match + 1;
324
 
                        info->n_bytes = 0;
325
 
                } else {
326
 
                        info->n_fields = cursor->up_match;
327
 
                        info->n_bytes = cursor->up_bytes + 1;
328
 
                }
329
 
 
330
 
                info->left_side = FALSE;
331
 
        }
332
 
}
333
 
 
334
 
/*************************************************************************
335
 
Updates the block search info on hash successes. NOTE that info and
336
 
block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any
337
 
semaphore, to save CPU time! Do not assume the fields are consistent. */
338
 
static
339
 
ibool
340
 
btr_search_update_block_hash_info(
341
 
/*==============================*/
342
 
                                /* out: TRUE if building a (new) hash index on
343
 
                                the block is recommended */
344
 
        btr_search_t*   info,   /* in: search info */
345
 
        buf_block_t*    block,  /* in: buffer block */
346
 
        btr_cur_t*      cursor __attribute__((unused)))
347
 
                                /* in: cursor */
348
 
{
349
 
#ifdef UNIV_SYNC_DEBUG
350
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
351
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
352
 
        ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED)
353
 
              || rw_lock_own(&block->lock, RW_LOCK_EX));
354
 
#endif /* UNIV_SYNC_DEBUG */
355
 
        ut_ad(cursor);
356
 
 
357
 
        info->last_hash_succ = FALSE;
358
 
 
359
 
        ut_a(buf_block_state_valid(block));
360
 
        ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N);
361
 
 
362
 
        if ((block->n_hash_helps > 0)
363
 
            && (info->n_hash_potential > 0)
364
 
            && (block->n_fields == info->n_fields)
365
 
            && (block->n_bytes == info->n_bytes)
366
 
            && (block->left_side == info->left_side)) {
367
 
 
368
 
                if ((block->is_hashed)
369
 
                    && (block->curr_n_fields == info->n_fields)
370
 
                    && (block->curr_n_bytes == info->n_bytes)
371
 
                    && (block->curr_left_side == info->left_side)) {
372
 
 
373
 
                        /* The search would presumably have succeeded using
374
 
                        the hash index */
375
 
 
376
 
                        info->last_hash_succ = TRUE;
377
 
                }
378
 
 
379
 
                block->n_hash_helps++;
380
 
        } else {
381
 
                block->n_hash_helps = 1;
382
 
                block->n_fields = info->n_fields;
383
 
                block->n_bytes = info->n_bytes;
384
 
                block->left_side = info->left_side;
385
 
        }
386
 
 
387
 
#ifdef UNIV_DEBUG
388
 
        if (cursor->index->table->does_not_fit_in_memory) {
389
 
                block->n_hash_helps = 0;
390
 
        }
391
 
#endif /* UNIV_DEBUG */
392
 
 
393
 
        if ((block->n_hash_helps > page_get_n_recs(block->frame)
394
 
             / BTR_SEARCH_PAGE_BUILD_LIMIT)
395
 
            && (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
396
 
 
397
 
                if ((!block->is_hashed)
398
 
                    || (block->n_hash_helps
399
 
                        > 2 * page_get_n_recs(block->frame))
400
 
                    || (block->n_fields != block->curr_n_fields)
401
 
                    || (block->n_bytes != block->curr_n_bytes)
402
 
                    || (block->left_side != block->curr_left_side)) {
403
 
 
404
 
                        /* Build a new hash index on the page */
405
 
 
406
 
                        return(TRUE);
407
 
                }
408
 
        }
409
 
 
410
 
        return(FALSE);
411
 
}
412
 
 
413
 
/*************************************************************************
414
 
Updates a hash node reference when it has been unsuccessfully used in a
415
 
search which could have succeeded with the used hash parameters. This can
416
 
happen because when building a hash index for a page, we do not check
417
 
what happens at page boundaries, and therefore there can be misleading
418
 
hash nodes. Also, collisions in the fold value can lead to misleading
419
 
references. This function lazily fixes these imperfections in the hash
420
 
index. */
421
 
static
422
 
void
423
 
btr_search_update_hash_ref(
424
 
/*=======================*/
425
 
        btr_search_t*   info,   /* in: search info */
426
 
        buf_block_t*    block,  /* in: buffer block where cursor positioned */
427
 
        btr_cur_t*      cursor) /* in: cursor */
428
 
{
429
 
        ulint   fold;
430
 
        rec_t*  rec;
431
 
        dulint  index_id;
432
 
 
433
 
        ut_ad(cursor->flag == BTR_CUR_HASH_FAIL);
434
 
#ifdef UNIV_SYNC_DEBUG
435
 
        ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
436
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
437
 
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
438
 
#endif /* UNIV_SYNC_DEBUG */
439
 
        ut_ad(page_align(btr_cur_get_rec(cursor))
440
 
              == buf_block_get_frame(block));
441
 
 
442
 
        if (!block->is_hashed) {
443
 
 
444
 
                return;
445
 
        }
446
 
 
447
 
        ut_a(block->index == cursor->index);
448
 
        ut_a(!dict_index_is_ibuf(cursor->index));
449
 
 
450
 
        if ((info->n_hash_potential > 0)
451
 
            && (block->curr_n_fields == info->n_fields)
452
 
            && (block->curr_n_bytes == info->n_bytes)
453
 
            && (block->curr_left_side == info->left_side)) {
454
 
                mem_heap_t*     heap            = NULL;
455
 
                ulint           offsets_[REC_OFFS_NORMAL_SIZE];
456
 
                rec_offs_init(offsets_);
457
 
 
458
 
                rec = btr_cur_get_rec(cursor);
459
 
 
460
 
                if (!page_rec_is_user_rec(rec)) {
461
 
 
462
 
                        return;
463
 
                }
464
 
 
465
 
                index_id = cursor->index->id;
466
 
                fold = rec_fold(rec,
467
 
                                rec_get_offsets(rec, cursor->index, offsets_,
468
 
                                                ULINT_UNDEFINED, &heap),
469
 
                                block->curr_n_fields,
470
 
                                block->curr_n_bytes, index_id);
471
 
                if (UNIV_LIKELY_NULL(heap)) {
472
 
                        mem_heap_free(heap);
473
 
                }
474
 
#ifdef UNIV_SYNC_DEBUG
475
 
                ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
476
 
#endif /* UNIV_SYNC_DEBUG */
477
 
 
478
 
                ha_insert_for_fold(btr_search_sys->hash_index, fold,
479
 
                                   block, rec);
480
 
        }
481
 
}
482
 
 
483
 
/*************************************************************************
484
 
Updates the search info. */
485
 
UNIV_INTERN
486
 
void
487
 
btr_search_info_update_slow(
488
 
/*========================*/
489
 
        btr_search_t*   info,   /* in/out: search info */
490
 
        btr_cur_t*      cursor) /* in: cursor which was just positioned */
491
 
{
492
 
        buf_block_t*    block;
493
 
        ibool           build_index;
494
 
        ulint*          params;
495
 
        ulint*          params2;
496
 
 
497
 
#ifdef UNIV_SYNC_DEBUG
498
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
499
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
500
 
#endif /* UNIV_SYNC_DEBUG */
501
 
 
502
 
        block = btr_cur_get_block(cursor);
503
 
 
504
 
        /* NOTE that the following two function calls do NOT protect
505
 
        info or block->n_fields etc. with any semaphore, to save CPU time!
506
 
        We cannot assume the fields are consistent when we return from
507
 
        those functions! */
508
 
 
509
 
        btr_search_info_update_hash(info, cursor);
510
 
 
511
 
        build_index = btr_search_update_block_hash_info(info, block, cursor);
512
 
 
513
 
        if (build_index || (cursor->flag == BTR_CUR_HASH_FAIL)) {
514
 
 
515
 
                btr_search_check_free_space_in_heap();
516
 
        }
517
 
 
518
 
        if (cursor->flag == BTR_CUR_HASH_FAIL) {
519
 
                /* Update the hash node reference, if appropriate */
520
 
 
521
 
#ifdef UNIV_SEARCH_PERF_STAT
522
 
                btr_search_n_hash_fail++;
523
 
#endif /* UNIV_SEARCH_PERF_STAT */
524
 
 
525
 
                rw_lock_x_lock(&btr_search_latch);
526
 
 
527
 
                btr_search_update_hash_ref(info, block, cursor);
528
 
 
529
 
                rw_lock_x_unlock(&btr_search_latch);
530
 
        }
531
 
 
532
 
        if (build_index) {
533
 
                /* Note that since we did not protect block->n_fields etc.
534
 
                with any semaphore, the values can be inconsistent. We have
535
 
                to check inside the function call that they make sense. We
536
 
                also malloc an array and store the values there to make sure
537
 
                the compiler does not let the function call parameters change
538
 
                inside the called function. It might be that the compiler
539
 
                would optimize the call just to pass pointers to block. */
540
 
 
541
 
                params = mem_alloc(3 * sizeof(ulint));
542
 
                params[0] = block->n_fields;
543
 
                params[1] = block->n_bytes;
544
 
                params[2] = block->left_side;
545
 
 
546
 
                /* Make sure the compiler cannot deduce the values and do
547
 
                optimizations */
548
 
 
549
 
                params2 = params + btr_search_this_is_zero;
550
 
 
551
 
                btr_search_build_page_hash_index(cursor->index,
552
 
                                                 block,
553
 
                                                 params2[0],
554
 
                                                 params2[1],
555
 
                                                 params2[2]);
556
 
                mem_free(params);
557
 
        }
558
 
}
559
 
 
560
 
/**********************************************************************
561
 
Checks if a guessed position for a tree cursor is right. Note that if
562
 
mode is PAGE_CUR_LE, which is used in inserts, and the function returns
563
 
TRUE, then cursor->up_match and cursor->low_match both have sensible values. */
564
 
static
565
 
ibool
566
 
btr_search_check_guess(
567
 
/*===================*/
568
 
                                /* out: TRUE if success */
569
 
        btr_cur_t*      cursor, /* in: guessed cursor position */
570
 
        ibool           can_only_compare_to_cursor_rec,
571
 
                                /* in: if we do not have a latch on the page
572
 
                                of cursor, but only a latch on
573
 
                                btr_search_latch, then ONLY the columns
574
 
                                of the record UNDER the cursor are
575
 
                                protected, not the next or previous record
576
 
                                in the chain: we cannot look at the next or
577
 
                                previous record to check our guess! */
578
 
        const dtuple_t* tuple,  /* in: data tuple */
579
 
        ulint           mode,   /* in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,
580
 
                                or PAGE_CUR_GE */
581
 
        mtr_t*          mtr)    /* in: mtr */
582
 
{
583
 
        rec_t*          rec;
584
 
        ulint           n_unique;
585
 
        ulint           match;
586
 
        ulint           bytes;
587
 
        int             cmp;
588
 
        mem_heap_t*     heap            = NULL;
589
 
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
590
 
        ulint*          offsets         = offsets_;
591
 
        ibool           success         = FALSE;
592
 
        rec_offs_init(offsets_);
593
 
 
594
 
        n_unique = dict_index_get_n_unique_in_tree(cursor->index);
595
 
 
596
 
        rec = btr_cur_get_rec(cursor);
597
 
 
598
 
        ut_ad(page_rec_is_user_rec(rec));
599
 
 
600
 
        match = 0;
601
 
        bytes = 0;
602
 
 
603
 
        offsets = rec_get_offsets(rec, cursor->index, offsets,
604
 
                                  n_unique, &heap);
605
 
        cmp = page_cmp_dtuple_rec_with_match(tuple, rec,
606
 
                                             offsets, &match, &bytes);
607
 
 
608
 
        if (mode == PAGE_CUR_GE) {
609
 
                if (cmp == 1) {
610
 
                        goto exit_func;
611
 
                }
612
 
 
613
 
                cursor->up_match = match;
614
 
 
615
 
                if (match >= n_unique) {
616
 
                        success = TRUE;
617
 
                        goto exit_func;
618
 
                }
619
 
        } else if (mode == PAGE_CUR_LE) {
620
 
                if (cmp == -1) {
621
 
                        goto exit_func;
622
 
                }
623
 
 
624
 
                cursor->low_match = match;
625
 
 
626
 
        } else if (mode == PAGE_CUR_G) {
627
 
                if (cmp != -1) {
628
 
                        goto exit_func;
629
 
                }
630
 
        } else if (mode == PAGE_CUR_L) {
631
 
                if (cmp != 1) {
632
 
                        goto exit_func;
633
 
                }
634
 
        }
635
 
 
636
 
        if (can_only_compare_to_cursor_rec) {
637
 
                /* Since we could not determine if our guess is right just by
638
 
                looking at the record under the cursor, return FALSE */
639
 
                goto exit_func;
640
 
        }
641
 
 
642
 
        match = 0;
643
 
        bytes = 0;
644
 
 
645
 
        if ((mode == PAGE_CUR_G) || (mode == PAGE_CUR_GE)) {
646
 
                rec_t*  prev_rec;
647
 
 
648
 
                ut_ad(!page_rec_is_infimum(rec));
649
 
 
650
 
                prev_rec = page_rec_get_prev(rec);
651
 
 
652
 
                if (page_rec_is_infimum(prev_rec)) {
653
 
                        success = btr_page_get_prev(page_align(prev_rec), mtr)
654
 
                                == FIL_NULL;
655
 
 
656
 
                        goto exit_func;
657
 
                }
658
 
 
659
 
                offsets = rec_get_offsets(prev_rec, cursor->index, offsets,
660
 
                                          n_unique, &heap);
661
 
                cmp = page_cmp_dtuple_rec_with_match(tuple, prev_rec,
662
 
                                                     offsets, &match, &bytes);
663
 
                if (mode == PAGE_CUR_GE) {
664
 
                        success = cmp == 1;
665
 
                } else {
666
 
                        success = cmp != -1;
667
 
                }
668
 
 
669
 
                goto exit_func;
670
 
        } else {
671
 
                rec_t*  next_rec;
672
 
 
673
 
                ut_ad(!page_rec_is_supremum(rec));
674
 
 
675
 
                next_rec = page_rec_get_next(rec);
676
 
 
677
 
                if (page_rec_is_supremum(next_rec)) {
678
 
                        if (btr_page_get_next(page_align(next_rec), mtr)
679
 
                            == FIL_NULL) {
680
 
 
681
 
                                cursor->up_match = 0;
682
 
                                success = TRUE;
683
 
                        }
684
 
 
685
 
                        goto exit_func;
686
 
                }
687
 
 
688
 
                offsets = rec_get_offsets(next_rec, cursor->index, offsets,
689
 
                                          n_unique, &heap);
690
 
                cmp = page_cmp_dtuple_rec_with_match(tuple, next_rec,
691
 
                                                     offsets, &match, &bytes);
692
 
                if (mode == PAGE_CUR_LE) {
693
 
                        success = cmp == -1;
694
 
                        cursor->up_match = match;
695
 
                } else {
696
 
                        success = cmp != 1;
697
 
                }
698
 
        }
699
 
exit_func:
700
 
        if (UNIV_LIKELY_NULL(heap)) {
701
 
                mem_heap_free(heap);
702
 
        }
703
 
        return(success);
704
 
}
705
 
 
706
 
/**********************************************************************
707
 
Tries to guess the right search position based on the hash search info
708
 
of the index. Note that if mode is PAGE_CUR_LE, which is used in inserts,
709
 
and the function returns TRUE, then cursor->up_match and cursor->low_match
710
 
both have sensible values. */
711
 
UNIV_INTERN
712
 
ibool
713
 
btr_search_guess_on_hash(
714
 
/*=====================*/
715
 
                                        /* out: TRUE if succeeded */
716
 
        dict_index_t*   index,          /* in: index */
717
 
        btr_search_t*   info,           /* in: index search info */
718
 
        const dtuple_t* tuple,          /* in: logical record */
719
 
        ulint           mode,           /* in: PAGE_CUR_L, ... */
720
 
        ulint           latch_mode,     /* in: BTR_SEARCH_LEAF, ...;
721
 
                                        NOTE that only if has_search_latch
722
 
                                        is 0, we will have a latch set on
723
 
                                        the cursor page, otherwise we assume
724
 
                                        the caller uses his search latch
725
 
                                        to protect the record! */
726
 
        btr_cur_t*      cursor,         /* out: tree cursor */
727
 
        ulint           has_search_latch,/* in: latch mode the caller
728
 
                                        currently has on btr_search_latch:
729
 
                                        RW_S_LATCH, RW_X_LATCH, or 0 */
730
 
        mtr_t*          mtr)            /* in: mtr */
731
 
{
732
 
        buf_block_t*    block;
733
 
        rec_t*          rec;
734
 
        const page_t*   page;
735
 
        ulint           fold;
736
 
        dulint          index_id;
737
 
#ifdef notdefined
738
 
        btr_cur_t       cursor2;
739
 
        btr_pcur_t      pcur;
740
 
#endif
741
 
        ut_ad(index && info && tuple && cursor && mtr);
742
 
        ut_ad((latch_mode == BTR_SEARCH_LEAF)
743
 
              || (latch_mode == BTR_MODIFY_LEAF));
744
 
 
745
 
        /* Note that, for efficiency, the struct info may not be protected by
746
 
        any latch here! */
747
 
 
748
 
        if (UNIV_UNLIKELY(info->n_hash_potential == 0)) {
749
 
 
750
 
                return(FALSE);
751
 
        }
752
 
 
753
 
        cursor->n_fields = info->n_fields;
754
 
        cursor->n_bytes = info->n_bytes;
755
 
 
756
 
        if (UNIV_UNLIKELY(dtuple_get_n_fields(tuple)
757
 
                          < cursor->n_fields + (cursor->n_bytes > 0))) {
758
 
 
759
 
                return(FALSE);
760
 
        }
761
 
 
762
 
        index_id = index->id;
763
 
 
764
 
#ifdef UNIV_SEARCH_PERF_STAT
765
 
        info->n_hash_succ++;
766
 
#endif
767
 
        fold = dtuple_fold(tuple, cursor->n_fields, cursor->n_bytes, index_id);
768
 
 
769
 
        cursor->fold = fold;
770
 
        cursor->flag = BTR_CUR_HASH;
771
 
 
772
 
        if (UNIV_LIKELY(!has_search_latch)) {
773
 
                rw_lock_s_lock(&btr_search_latch);
774
 
        }
775
 
 
776
 
        ut_ad(btr_search_latch.writer != RW_LOCK_EX);
777
 
        ut_ad(btr_search_latch.reader_count > 0);
778
 
 
779
 
        rec = ha_search_and_get_data(btr_search_sys->hash_index, fold);
780
 
 
781
 
        if (UNIV_UNLIKELY(!rec)) {
782
 
                goto failure_unlock;
783
 
        }
784
 
 
785
 
        page = page_align(rec);
786
 
        {
787
 
                ulint   page_no         = page_get_page_no(page);
788
 
                ulint   space_id        = page_get_space_id(page);
789
 
 
790
 
                buf_pool_mutex_enter();
791
 
                block = (buf_block_t*) buf_page_hash_get(space_id, page_no);
792
 
                buf_pool_mutex_exit();
793
 
        }
794
 
 
795
 
        if (UNIV_UNLIKELY(!block)
796
 
            || UNIV_UNLIKELY(buf_block_get_state(block)
797
 
                             != BUF_BLOCK_FILE_PAGE)) {
798
 
 
799
 
                /* The block is most probably being freed.
800
 
                The function buf_LRU_search_and_free_block()
801
 
                first removes the block from buf_pool->page_hash
802
 
                by calling buf_LRU_block_remove_hashed_page().
803
 
                After that, it invokes btr_search_drop_page_hash_index().
804
 
                Let us pretend that the block was also removed from
805
 
                the adaptive hash index. */
806
 
                goto failure_unlock;
807
 
        }
808
 
 
809
 
        if (UNIV_LIKELY(!has_search_latch)) {
810
 
 
811
 
                if (UNIV_UNLIKELY(
812
 
                            !buf_page_get_known_nowait(latch_mode, block,
813
 
                                                       BUF_MAKE_YOUNG,
814
 
                                                       __FILE__, __LINE__,
815
 
                                                       mtr))) {
816
 
                        goto failure_unlock;
817
 
                }
818
 
 
819
 
                rw_lock_s_unlock(&btr_search_latch);
820
 
 
821
 
#ifdef UNIV_SYNC_DEBUG
822
 
                buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
823
 
#endif /* UNIV_SYNC_DEBUG */
824
 
        }
825
 
 
826
 
        if (UNIV_UNLIKELY(buf_block_get_state(block)
827
 
                          == BUF_BLOCK_REMOVE_HASH)) {
828
 
                if (UNIV_LIKELY(!has_search_latch)) {
829
 
 
830
 
                        btr_leaf_page_release(block, latch_mode, mtr);
831
 
                }
832
 
 
833
 
                goto failure;
834
 
        }
835
 
 
836
 
        ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
837
 
        ut_ad(page_rec_is_user_rec(rec));
838
 
 
839
 
        btr_cur_position(index, rec, block, cursor);
840
 
 
841
 
        /* Check the validity of the guess within the page */
842
 
 
843
 
        /* If we only have the latch on btr_search_latch, not on the
844
 
        page, it only protects the columns of the record the cursor
845
 
        is positioned on. We cannot look at the next of the previous
846
 
        record to determine if our guess for the cursor position is
847
 
        right. */
848
 
        if (UNIV_EXPECT(
849
 
                    ut_dulint_cmp(index_id, btr_page_get_index_id(page)), 0)
850
 
            || !btr_search_check_guess(cursor,
851
 
                                       has_search_latch,
852
 
                                       tuple, mode, mtr)) {
853
 
                if (UNIV_LIKELY(!has_search_latch)) {
854
 
                        btr_leaf_page_release(block, latch_mode, mtr);
855
 
                }
856
 
 
857
 
                goto failure;
858
 
        }
859
 
 
860
 
        if (UNIV_LIKELY(info->n_hash_potential < BTR_SEARCH_BUILD_LIMIT + 5)) {
861
 
 
862
 
                info->n_hash_potential++;
863
 
        }
864
 
 
865
 
#ifdef notdefined
866
 
        /* These lines of code can be used in a debug version to check
867
 
        the correctness of the searched cursor position: */
868
 
 
869
 
        info->last_hash_succ = FALSE;
870
 
 
871
 
        /* Currently, does not work if the following fails: */
872
 
        ut_ad(!has_search_latch);
873
 
 
874
 
        btr_leaf_page_release(block, latch_mode, mtr);
875
 
 
876
 
        btr_cur_search_to_nth_level(index, 0, tuple, mode, latch_mode,
877
 
                                    &cursor2, 0, mtr);
878
 
        if (mode == PAGE_CUR_GE
879
 
            && page_rec_is_supremum(btr_cur_get_rec(&cursor2))) {
880
 
 
881
 
                /* If mode is PAGE_CUR_GE, then the binary search
882
 
                in the index tree may actually take us to the supremum
883
 
                of the previous page */
884
 
 
885
 
                info->last_hash_succ = FALSE;
886
 
 
887
 
                btr_pcur_open_on_user_rec(index, tuple, mode, latch_mode,
888
 
                                          &pcur, mtr);
889
 
                ut_ad(btr_pcur_get_rec(&pcur) == btr_cur_get_rec(cursor));
890
 
        } else {
891
 
                ut_ad(btr_cur_get_rec(&cursor2) == btr_cur_get_rec(cursor));
892
 
        }
893
 
 
894
 
        /* NOTE that it is theoretically possible that the above assertions
895
 
        fail if the page of the cursor gets removed from the buffer pool
896
 
        meanwhile! Thus it might not be a bug. */
897
 
#endif
898
 
        info->last_hash_succ = TRUE;
899
 
 
900
 
#ifdef UNIV_SEARCH_PERF_STAT
901
 
        btr_search_n_succ++;
902
 
#endif
903
 
        if (UNIV_LIKELY(!has_search_latch)
904
 
            && buf_page_peek_if_too_old(&block->page)) {
905
 
 
906
 
                buf_page_make_young(&block->page);
907
 
        }
908
 
 
909
 
        /* Increment the page get statistics though we did not really
910
 
        fix the page: for user info only */
911
 
 
912
 
        buf_pool->n_page_gets++;
913
 
 
914
 
        return(TRUE);
915
 
 
916
 
        /*-------------------------------------------*/
917
 
failure_unlock:
918
 
        if (UNIV_LIKELY(!has_search_latch)) {
919
 
                rw_lock_s_unlock(&btr_search_latch);
920
 
        }
921
 
failure:
922
 
        cursor->flag = BTR_CUR_HASH_FAIL;
923
 
 
924
 
#ifdef UNIV_SEARCH_PERF_STAT
925
 
        info->n_hash_fail++;
926
 
 
927
 
        if (info->n_hash_succ > 0) {
928
 
                info->n_hash_succ--;
929
 
        }
930
 
#endif
931
 
        info->last_hash_succ = FALSE;
932
 
 
933
 
        return(FALSE);
934
 
}
935
 
 
936
 
/************************************************************************
937
 
Drops a page hash index. */
938
 
UNIV_INTERN
939
 
void
940
 
btr_search_drop_page_hash_index(
941
 
/*============================*/
942
 
        buf_block_t*    block)  /* in: block containing index page,
943
 
                                s- or x-latched, or an index page
944
 
                                for which we know that
945
 
                                block->buf_fix_count == 0 */
946
 
{
947
 
        hash_table_t*   table;
948
 
        ulint           n_fields;
949
 
        ulint           n_bytes;
950
 
        page_t*         page;
951
 
        rec_t*          rec;
952
 
        ulint           fold;
953
 
        ulint           prev_fold;
954
 
        dulint          index_id;
955
 
        ulint           n_cached;
956
 
        ulint           n_recs;
957
 
        ulint*          folds;
958
 
        ulint           i;
959
 
        mem_heap_t*     heap;
960
 
        dict_index_t*   index;
961
 
        ulint*          offsets;
962
 
 
963
 
#ifdef UNIV_SYNC_DEBUG
964
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
965
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
966
 
#endif /* UNIV_SYNC_DEBUG */
967
 
 
968
 
retry:
969
 
        rw_lock_s_lock(&btr_search_latch);
970
 
        page = block->frame;
971
 
 
972
 
        if (UNIV_LIKELY(!block->is_hashed)) {
973
 
 
974
 
                rw_lock_s_unlock(&btr_search_latch);
975
 
 
976
 
                return;
977
 
        }
978
 
 
979
 
        table = btr_search_sys->hash_index;
980
 
 
981
 
#ifdef UNIV_SYNC_DEBUG
982
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
983
 
              || rw_lock_own(&(block->lock), RW_LOCK_EX)
984
 
              || (block->page.buf_fix_count == 0));
985
 
#endif /* UNIV_SYNC_DEBUG */
986
 
 
987
 
        n_fields = block->curr_n_fields;
988
 
        n_bytes = block->curr_n_bytes;
989
 
        index = block->index;
990
 
        ut_a(!dict_index_is_ibuf(index));
991
 
 
992
 
        /* NOTE: The fields of block must not be accessed after
993
 
        releasing btr_search_latch, as the index page might only
994
 
        be s-latched! */
995
 
 
996
 
        rw_lock_s_unlock(&btr_search_latch);
997
 
 
998
 
        ut_a(n_fields + n_bytes > 0);
999
 
 
1000
 
        n_recs = page_get_n_recs(page);
1001
 
 
1002
 
        /* Calculate and cache fold values into an array for fast deletion
1003
 
        from the hash index */
1004
 
 
1005
 
        folds = mem_alloc(n_recs * sizeof(ulint));
1006
 
 
1007
 
        n_cached = 0;
1008
 
 
1009
 
        rec = page_get_infimum_rec(page);
1010
 
        rec = page_rec_get_next(rec);
1011
 
 
1012
 
        index_id = btr_page_get_index_id(page);
1013
 
 
1014
 
        ut_a(0 == ut_dulint_cmp(index_id, index->id));
1015
 
 
1016
 
        prev_fold = 0;
1017
 
 
1018
 
        heap = NULL;
1019
 
        offsets = NULL;
1020
 
 
1021
 
        while (!page_rec_is_supremum(rec)) {
1022
 
                offsets = rec_get_offsets(rec, index, offsets,
1023
 
                                          n_fields + (n_bytes > 0), &heap);
1024
 
                ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
1025
 
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1026
 
 
1027
 
                if (fold == prev_fold && prev_fold != 0) {
1028
 
 
1029
 
                        goto next_rec;
1030
 
                }
1031
 
 
1032
 
                /* Remove all hash nodes pointing to this page from the
1033
 
                hash chain */
1034
 
 
1035
 
                folds[n_cached] = fold;
1036
 
                n_cached++;
1037
 
next_rec:
1038
 
                rec = page_rec_get_next(rec);
1039
 
                prev_fold = fold;
1040
 
        }
1041
 
 
1042
 
        if (UNIV_LIKELY_NULL(heap)) {
1043
 
                mem_heap_free(heap);
1044
 
        }
1045
 
 
1046
 
        rw_lock_x_lock(&btr_search_latch);
1047
 
 
1048
 
        if (UNIV_UNLIKELY(!block->is_hashed)) {
1049
 
                /* Someone else has meanwhile dropped the hash index */
1050
 
 
1051
 
                goto cleanup;
1052
 
        }
1053
 
 
1054
 
        ut_a(block->index == index);
1055
 
 
1056
 
        if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
1057
 
            || UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
1058
 
 
1059
 
                /* Someone else has meanwhile built a new hash index on the
1060
 
                page, with different parameters */
1061
 
 
1062
 
                rw_lock_x_unlock(&btr_search_latch);
1063
 
 
1064
 
                mem_free(folds);
1065
 
                goto retry;
1066
 
        }
1067
 
 
1068
 
        for (i = 0; i < n_cached; i++) {
1069
 
 
1070
 
                ha_remove_all_nodes_to_page(table, folds[i], page);
1071
 
        }
1072
 
 
1073
 
        block->is_hashed = FALSE;
1074
 
        block->index = NULL;
1075
 
cleanup:
1076
 
#ifdef UNIV_DEBUG
1077
 
        if (UNIV_UNLIKELY(block->n_pointers)) {
1078
 
                /* Corruption */
1079
 
                ut_print_timestamp(stderr);
1080
 
                fprintf(stderr,
1081
 
                        "  InnoDB: Corruption of adaptive hash index."
1082
 
                        " After dropping\n"
1083
 
                        "InnoDB: the hash index to a page of %s,"
1084
 
                        " still %lu hash nodes remain.\n",
1085
 
                        index->name, (ulong) block->n_pointers);
1086
 
                rw_lock_x_unlock(&btr_search_latch);
1087
 
 
1088
 
                btr_search_validate();
1089
 
        } else {
1090
 
                rw_lock_x_unlock(&btr_search_latch);
1091
 
        }
1092
 
#else /* UNIV_DEBUG */
1093
 
        rw_lock_x_unlock(&btr_search_latch);
1094
 
#endif /* UNIV_DEBUG */
1095
 
 
1096
 
        mem_free(folds);
1097
 
}
1098
 
 
1099
 
/************************************************************************
1100
 
Drops a page hash index when a page is freed from a fseg to the file system.
1101
 
Drops possible hash index if the page happens to be in the buffer pool. */
1102
 
UNIV_INTERN
1103
 
void
1104
 
btr_search_drop_page_hash_when_freed(
1105
 
/*=================================*/
1106
 
        ulint   space,          /* in: space id */
1107
 
        ulint   zip_size,       /* in: compressed page size in bytes
1108
 
                                or 0 for uncompressed pages */
1109
 
        ulint   page_no)        /* in: page number */
1110
 
{
1111
 
        buf_block_t*    block;
1112
 
        mtr_t           mtr;
1113
 
 
1114
 
        if (!buf_page_peek_if_search_hashed(space, page_no)) {
1115
 
 
1116
 
                return;
1117
 
        }
1118
 
 
1119
 
        mtr_start(&mtr);
1120
 
 
1121
 
        /* We assume that if the caller has a latch on the page, then the
1122
 
        caller has already dropped the hash index for the page, and we never
1123
 
        get here. Therefore we can acquire the s-latch to the page without
1124
 
        having to fear a deadlock. */
1125
 
 
1126
 
        block = buf_page_get_gen(space, zip_size, page_no, RW_S_LATCH, NULL,
1127
 
                                BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
1128
 
                                &mtr);
1129
 
 
1130
 
#ifdef UNIV_SYNC_DEBUG
1131
 
        buf_block_dbg_add_level(block, SYNC_TREE_NODE_FROM_HASH);
1132
 
#endif /* UNIV_SYNC_DEBUG */
1133
 
 
1134
 
        btr_search_drop_page_hash_index(block);
1135
 
 
1136
 
        mtr_commit(&mtr);
1137
 
}
1138
 
 
1139
 
/************************************************************************
1140
 
Builds a hash index on a page with the given parameters. If the page already
1141
 
has a hash index with different parameters, the old hash index is removed.
1142
 
If index is non-NULL, this function checks if n_fields and n_bytes are
1143
 
sensible values, and does not build a hash index if not. */
1144
 
static
1145
 
void
1146
 
btr_search_build_page_hash_index(
1147
 
/*=============================*/
1148
 
        dict_index_t*   index,  /* in: index for which to build */
1149
 
        buf_block_t*    block,  /* in: index page, s- or x-latched */
1150
 
        ulint           n_fields,/* in: hash this many full fields */
1151
 
        ulint           n_bytes,/* in: hash this many bytes from the next
1152
 
                                field */
1153
 
        ibool           left_side)/* in: hash for searches from left side? */
1154
 
{
1155
 
        hash_table_t*   table;
1156
 
        page_t*         page;
1157
 
        rec_t*          rec;
1158
 
        rec_t*          next_rec;
1159
 
        ulint           fold;
1160
 
        ulint           next_fold;
1161
 
        dulint          index_id;
1162
 
        ulint           n_cached;
1163
 
        ulint           n_recs;
1164
 
        ulint*          folds;
1165
 
        rec_t**         recs;
1166
 
        ulint           i;
1167
 
        mem_heap_t*     heap            = NULL;
1168
 
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
1169
 
        ulint*          offsets         = offsets_;
1170
 
        rec_offs_init(offsets_);
1171
 
 
1172
 
        ut_ad(index);
1173
 
        ut_a(!dict_index_is_ibuf(index));
1174
 
 
1175
 
        table = btr_search_sys->hash_index;
1176
 
        page = buf_block_get_frame(block);
1177
 
 
1178
 
#ifdef UNIV_SYNC_DEBUG
1179
 
        ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1180
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1181
 
              || rw_lock_own(&(block->lock), RW_LOCK_EX));
1182
 
#endif /* UNIV_SYNC_DEBUG */
1183
 
 
1184
 
        rw_lock_s_lock(&btr_search_latch);
1185
 
 
1186
 
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
1187
 
                                 || (block->curr_n_bytes != n_bytes)
1188
 
                                 || (block->curr_left_side != left_side))) {
1189
 
 
1190
 
                rw_lock_s_unlock(&btr_search_latch);
1191
 
 
1192
 
                btr_search_drop_page_hash_index(block);
1193
 
        } else {
1194
 
                rw_lock_s_unlock(&btr_search_latch);
1195
 
        }
1196
 
 
1197
 
        n_recs = page_get_n_recs(page);
1198
 
 
1199
 
        if (n_recs == 0) {
1200
 
 
1201
 
                return;
1202
 
        }
1203
 
 
1204
 
        /* Check that the values for hash index build are sensible */
1205
 
 
1206
 
        if (n_fields + n_bytes == 0) {
1207
 
 
1208
 
                return;
1209
 
        }
1210
 
 
1211
 
        if (dict_index_get_n_unique_in_tree(index) < n_fields
1212
 
            || (dict_index_get_n_unique_in_tree(index) == n_fields
1213
 
                && n_bytes > 0)) {
1214
 
                return;
1215
 
        }
1216
 
 
1217
 
        /* Calculate and cache fold values and corresponding records into
1218
 
        an array for fast insertion to the hash index */
1219
 
 
1220
 
        folds = mem_alloc(n_recs * sizeof(ulint));
1221
 
        recs = mem_alloc(n_recs * sizeof(rec_t*));
1222
 
 
1223
 
        n_cached = 0;
1224
 
 
1225
 
        index_id = btr_page_get_index_id(page);
1226
 
 
1227
 
        rec = page_rec_get_next(page_get_infimum_rec(page));
1228
 
 
1229
 
        offsets = rec_get_offsets(rec, index, offsets,
1230
 
                                  n_fields + (n_bytes > 0), &heap);
1231
 
 
1232
 
        if (!page_rec_is_supremum(rec)) {
1233
 
                ut_a(n_fields <= rec_offs_n_fields(offsets));
1234
 
 
1235
 
                if (n_bytes > 0) {
1236
 
                        ut_a(n_fields < rec_offs_n_fields(offsets));
1237
 
                }
1238
 
        }
1239
 
 
1240
 
        fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1241
 
 
1242
 
        if (left_side) {
1243
 
 
1244
 
                folds[n_cached] = fold;
1245
 
                recs[n_cached] = rec;
1246
 
                n_cached++;
1247
 
        }
1248
 
 
1249
 
        for (;;) {
1250
 
                next_rec = page_rec_get_next(rec);
1251
 
 
1252
 
                if (page_rec_is_supremum(next_rec)) {
1253
 
 
1254
 
                        if (!left_side) {
1255
 
 
1256
 
                                folds[n_cached] = fold;
1257
 
                                recs[n_cached] = rec;
1258
 
                                n_cached++;
1259
 
                        }
1260
 
 
1261
 
                        break;
1262
 
                }
1263
 
 
1264
 
                offsets = rec_get_offsets(next_rec, index, offsets,
1265
 
                                          n_fields + (n_bytes > 0), &heap);
1266
 
                next_fold = rec_fold(next_rec, offsets, n_fields,
1267
 
                                     n_bytes, index_id);
1268
 
 
1269
 
                if (fold != next_fold) {
1270
 
                        /* Insert an entry into the hash index */
1271
 
 
1272
 
                        if (left_side) {
1273
 
 
1274
 
                                folds[n_cached] = next_fold;
1275
 
                                recs[n_cached] = next_rec;
1276
 
                                n_cached++;
1277
 
                        } else {
1278
 
                                folds[n_cached] = fold;
1279
 
                                recs[n_cached] = rec;
1280
 
                                n_cached++;
1281
 
                        }
1282
 
                }
1283
 
 
1284
 
                rec = next_rec;
1285
 
                fold = next_fold;
1286
 
        }
1287
 
 
1288
 
        btr_search_check_free_space_in_heap();
1289
 
 
1290
 
        rw_lock_x_lock(&btr_search_latch);
1291
 
 
1292
 
        if (block->is_hashed && ((block->curr_n_fields != n_fields)
1293
 
                                 || (block->curr_n_bytes != n_bytes)
1294
 
                                 || (block->curr_left_side != left_side))) {
1295
 
                goto exit_func;
1296
 
        }
1297
 
 
1298
 
        block->is_hashed = TRUE;
1299
 
        block->n_hash_helps = 0;
1300
 
 
1301
 
        block->curr_n_fields = n_fields;
1302
 
        block->curr_n_bytes = n_bytes;
1303
 
        block->curr_left_side = left_side;
1304
 
        block->index = index;
1305
 
 
1306
 
        for (i = 0; i < n_cached; i++) {
1307
 
 
1308
 
                ha_insert_for_fold(table, folds[i], block, recs[i]);
1309
 
        }
1310
 
 
1311
 
exit_func:
1312
 
        rw_lock_x_unlock(&btr_search_latch);
1313
 
 
1314
 
        mem_free(folds);
1315
 
        mem_free(recs);
1316
 
        if (UNIV_LIKELY_NULL(heap)) {
1317
 
                mem_heap_free(heap);
1318
 
        }
1319
 
}
1320
 
 
1321
 
/************************************************************************
1322
 
Moves or deletes hash entries for moved records. If new_page is already hashed,
1323
 
then the hash index for page, if any, is dropped. If new_page is not hashed,
1324
 
and page is hashed, then a new hash index is built to new_page with the same
1325
 
parameters as page (this often happens when a page is split). */
1326
 
UNIV_INTERN
1327
 
void
1328
 
btr_search_move_or_delete_hash_entries(
1329
 
/*===================================*/
1330
 
        buf_block_t*    new_block,      /* in: records are copied
1331
 
                                        to this page */
1332
 
        buf_block_t*    block,          /* in: index page from which
1333
 
                                        records were copied, and the
1334
 
                                        copied records will be deleted
1335
 
                                        from this page */
1336
 
        dict_index_t*   index)          /* in: record descriptor */
1337
 
{
1338
 
        ulint   n_fields;
1339
 
        ulint   n_bytes;
1340
 
        ibool   left_side;
1341
 
 
1342
 
#ifdef UNIV_SYNC_DEBUG
1343
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1344
 
        ut_ad(rw_lock_own(&(new_block->lock), RW_LOCK_EX));
1345
 
#endif /* UNIV_SYNC_DEBUG */
1346
 
        ut_a(!new_block->is_hashed || new_block->index == index);
1347
 
        ut_a(!block->is_hashed || block->index == index);
1348
 
        ut_a(!(new_block->is_hashed || block->is_hashed)
1349
 
             || !dict_index_is_ibuf(index));
1350
 
 
1351
 
        rw_lock_s_lock(&btr_search_latch);
1352
 
 
1353
 
        if (new_block->is_hashed) {
1354
 
 
1355
 
                rw_lock_s_unlock(&btr_search_latch);
1356
 
 
1357
 
                btr_search_drop_page_hash_index(block);
1358
 
 
1359
 
                return;
1360
 
        }
1361
 
 
1362
 
        if (block->is_hashed) {
1363
 
 
1364
 
                n_fields = block->curr_n_fields;
1365
 
                n_bytes = block->curr_n_bytes;
1366
 
                left_side = block->curr_left_side;
1367
 
 
1368
 
                new_block->n_fields = block->curr_n_fields;
1369
 
                new_block->n_bytes = block->curr_n_bytes;
1370
 
                new_block->left_side = left_side;
1371
 
 
1372
 
                rw_lock_s_unlock(&btr_search_latch);
1373
 
 
1374
 
                ut_a(n_fields + n_bytes > 0);
1375
 
 
1376
 
                btr_search_build_page_hash_index(index, new_block, n_fields,
1377
 
                                                 n_bytes, left_side);
1378
 
                ut_ad(n_fields == block->curr_n_fields);
1379
 
                ut_ad(n_bytes == block->curr_n_bytes);
1380
 
                ut_ad(left_side == block->curr_left_side);
1381
 
                return;
1382
 
        }
1383
 
 
1384
 
        rw_lock_s_unlock(&btr_search_latch);
1385
 
}
1386
 
 
1387
 
/************************************************************************
1388
 
Updates the page hash index when a single record is deleted from a page. */
1389
 
UNIV_INTERN
1390
 
void
1391
 
btr_search_update_hash_on_delete(
1392
 
/*=============================*/
1393
 
        btr_cur_t*      cursor) /* in: cursor which was positioned on the
1394
 
                                record to delete using btr_cur_search_...,
1395
 
                                the record is not yet deleted */
1396
 
{
1397
 
        hash_table_t*   table;
1398
 
        buf_block_t*    block;
1399
 
        rec_t*          rec;
1400
 
        ulint           fold;
1401
 
        dulint          index_id;
1402
 
        ibool           found;
1403
 
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
1404
 
        mem_heap_t*     heap            = NULL;
1405
 
        rec_offs_init(offsets_);
1406
 
 
1407
 
        rec = btr_cur_get_rec(cursor);
1408
 
 
1409
 
        block = btr_cur_get_block(cursor);
1410
 
 
1411
 
#ifdef UNIV_SYNC_DEBUG
1412
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1413
 
#endif /* UNIV_SYNC_DEBUG */
1414
 
 
1415
 
        if (!block->is_hashed) {
1416
 
 
1417
 
                return;
1418
 
        }
1419
 
 
1420
 
        ut_a(block->index == cursor->index);
1421
 
        ut_a(block->curr_n_fields + block->curr_n_bytes > 0);
1422
 
        ut_a(!dict_index_is_ibuf(cursor->index));
1423
 
 
1424
 
        table = btr_search_sys->hash_index;
1425
 
 
1426
 
        index_id = cursor->index->id;
1427
 
        fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
1428
 
                                             ULINT_UNDEFINED, &heap),
1429
 
                        block->curr_n_fields, block->curr_n_bytes, index_id);
1430
 
        if (UNIV_LIKELY_NULL(heap)) {
1431
 
                mem_heap_free(heap);
1432
 
        }
1433
 
        rw_lock_x_lock(&btr_search_latch);
1434
 
 
1435
 
        found = ha_search_and_delete_if_found(table, fold, rec);
1436
 
 
1437
 
        rw_lock_x_unlock(&btr_search_latch);
1438
 
}
1439
 
 
1440
 
/************************************************************************
1441
 
Updates the page hash index when a single record is inserted on a page. */
1442
 
UNIV_INTERN
1443
 
void
1444
 
btr_search_update_hash_node_on_insert(
1445
 
/*==================================*/
1446
 
        btr_cur_t*      cursor) /* in: cursor which was positioned to the
1447
 
                                place to insert using btr_cur_search_...,
1448
 
                                and the new record has been inserted next
1449
 
                                to the cursor */
1450
 
{
1451
 
        hash_table_t*   table;
1452
 
        buf_block_t*    block;
1453
 
        rec_t*          rec;
1454
 
 
1455
 
        rec = btr_cur_get_rec(cursor);
1456
 
 
1457
 
        block = btr_cur_get_block(cursor);
1458
 
 
1459
 
#ifdef UNIV_SYNC_DEBUG
1460
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1461
 
#endif /* UNIV_SYNC_DEBUG */
1462
 
 
1463
 
        if (!block->is_hashed) {
1464
 
 
1465
 
                return;
1466
 
        }
1467
 
 
1468
 
        ut_a(block->index == cursor->index);
1469
 
        ut_a(!dict_index_is_ibuf(cursor->index));
1470
 
 
1471
 
        rw_lock_x_lock(&btr_search_latch);
1472
 
 
1473
 
        if ((cursor->flag == BTR_CUR_HASH)
1474
 
            && (cursor->n_fields == block->curr_n_fields)
1475
 
            && (cursor->n_bytes == block->curr_n_bytes)
1476
 
            && !block->curr_left_side) {
1477
 
 
1478
 
                table = btr_search_sys->hash_index;
1479
 
 
1480
 
                ha_search_and_update_if_found(table, cursor->fold, rec,
1481
 
                                              block, page_rec_get_next(rec));
1482
 
 
1483
 
                rw_lock_x_unlock(&btr_search_latch);
1484
 
        } else {
1485
 
                rw_lock_x_unlock(&btr_search_latch);
1486
 
 
1487
 
                btr_search_update_hash_on_insert(cursor);
1488
 
        }
1489
 
}
1490
 
 
1491
 
/************************************************************************
1492
 
Updates the page hash index when a single record is inserted on a page. */
1493
 
UNIV_INTERN
1494
 
void
1495
 
btr_search_update_hash_on_insert(
1496
 
/*=============================*/
1497
 
        btr_cur_t*      cursor) /* in: cursor which was positioned to the
1498
 
                                place to insert using btr_cur_search_...,
1499
 
                                and the new record has been inserted next
1500
 
                                to the cursor */
1501
 
{
1502
 
        hash_table_t*   table;
1503
 
        buf_block_t*    block;
1504
 
        rec_t*          rec;
1505
 
        rec_t*          ins_rec;
1506
 
        rec_t*          next_rec;
1507
 
        dulint          index_id;
1508
 
        ulint           fold;
1509
 
        ulint           ins_fold;
1510
 
        ulint           next_fold = 0; /* remove warning (??? bug ???) */
1511
 
        ulint           n_fields;
1512
 
        ulint           n_bytes;
1513
 
        ibool           left_side;
1514
 
        ibool           locked          = FALSE;
1515
 
        mem_heap_t*     heap            = NULL;
1516
 
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
1517
 
        ulint*          offsets         = offsets_;
1518
 
        rec_offs_init(offsets_);
1519
 
 
1520
 
        table = btr_search_sys->hash_index;
1521
 
 
1522
 
        btr_search_check_free_space_in_heap();
1523
 
 
1524
 
        rec = btr_cur_get_rec(cursor);
1525
 
 
1526
 
        block = btr_cur_get_block(cursor);
1527
 
 
1528
 
#ifdef UNIV_SYNC_DEBUG
1529
 
        ut_ad(rw_lock_own(&(block->lock), RW_LOCK_EX));
1530
 
#endif /* UNIV_SYNC_DEBUG */
1531
 
 
1532
 
        if (!block->is_hashed) {
1533
 
 
1534
 
                return;
1535
 
        }
1536
 
 
1537
 
        ut_a(block->index == cursor->index);
1538
 
        ut_a(!dict_index_is_ibuf(cursor->index));
1539
 
 
1540
 
        index_id = cursor->index->id;
1541
 
 
1542
 
        n_fields = block->curr_n_fields;
1543
 
        n_bytes = block->curr_n_bytes;
1544
 
        left_side = block->curr_left_side;
1545
 
 
1546
 
        ins_rec = page_rec_get_next(rec);
1547
 
        next_rec = page_rec_get_next(ins_rec);
1548
 
 
1549
 
        offsets = rec_get_offsets(ins_rec, cursor->index, offsets,
1550
 
                                  ULINT_UNDEFINED, &heap);
1551
 
        ins_fold = rec_fold(ins_rec, offsets, n_fields, n_bytes, index_id);
1552
 
 
1553
 
        if (!page_rec_is_supremum(next_rec)) {
1554
 
                offsets = rec_get_offsets(next_rec, cursor->index, offsets,
1555
 
                                          n_fields + (n_bytes > 0), &heap);
1556
 
                next_fold = rec_fold(next_rec, offsets, n_fields,
1557
 
                                     n_bytes, index_id);
1558
 
        }
1559
 
 
1560
 
        if (!page_rec_is_infimum(rec)) {
1561
 
                offsets = rec_get_offsets(rec, cursor->index, offsets,
1562
 
                                          n_fields + (n_bytes > 0), &heap);
1563
 
                fold = rec_fold(rec, offsets, n_fields, n_bytes, index_id);
1564
 
        } else {
1565
 
                if (left_side) {
1566
 
 
1567
 
                        rw_lock_x_lock(&btr_search_latch);
1568
 
 
1569
 
                        locked = TRUE;
1570
 
 
1571
 
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
1572
 
                }
1573
 
 
1574
 
                goto check_next_rec;
1575
 
        }
1576
 
 
1577
 
        if (fold != ins_fold) {
1578
 
 
1579
 
                if (!locked) {
1580
 
 
1581
 
                        rw_lock_x_lock(&btr_search_latch);
1582
 
 
1583
 
                        locked = TRUE;
1584
 
                }
1585
 
 
1586
 
                if (!left_side) {
1587
 
                        ha_insert_for_fold(table, fold, block, rec);
1588
 
                } else {
1589
 
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
1590
 
                }
1591
 
        }
1592
 
 
1593
 
check_next_rec:
1594
 
        if (page_rec_is_supremum(next_rec)) {
1595
 
 
1596
 
                if (!left_side) {
1597
 
 
1598
 
                        if (!locked) {
1599
 
                                rw_lock_x_lock(&btr_search_latch);
1600
 
 
1601
 
                                locked = TRUE;
1602
 
                        }
1603
 
 
1604
 
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
1605
 
                }
1606
 
 
1607
 
                goto function_exit;
1608
 
        }
1609
 
 
1610
 
        if (ins_fold != next_fold) {
1611
 
 
1612
 
                if (!locked) {
1613
 
 
1614
 
                        rw_lock_x_lock(&btr_search_latch);
1615
 
 
1616
 
                        locked = TRUE;
1617
 
                }
1618
 
 
1619
 
                if (!left_side) {
1620
 
 
1621
 
                        ha_insert_for_fold(table, ins_fold, block, ins_rec);
1622
 
                        /*
1623
 
                        fputs("Hash insert for ", stderr);
1624
 
                        dict_index_name_print(stderr, cursor->index);
1625
 
                        fprintf(stderr, " fold %lu\n", ins_fold);
1626
 
                        */
1627
 
                } else {
1628
 
                        ha_insert_for_fold(table, next_fold, block, next_rec);
1629
 
                }
1630
 
        }
1631
 
 
1632
 
function_exit:
1633
 
        if (UNIV_LIKELY_NULL(heap)) {
1634
 
                mem_heap_free(heap);
1635
 
        }
1636
 
        if (locked) {
1637
 
                rw_lock_x_unlock(&btr_search_latch);
1638
 
        }
1639
 
}
1640
 
 
1641
 
/************************************************************************
1642
 
Validates the search system. */
1643
 
UNIV_INTERN
1644
 
ibool
1645
 
btr_search_validate(void)
1646
 
/*=====================*/
1647
 
                                /* out: TRUE if ok */
1648
 
{
1649
 
        page_t*         page;
1650
 
        ha_node_t*      node;
1651
 
        ulint           n_page_dumps    = 0;
1652
 
        ibool           ok              = TRUE;
1653
 
        ulint           i;
1654
 
        ulint           cell_count;
1655
 
        mem_heap_t*     heap            = NULL;
1656
 
        ulint           offsets_[REC_OFFS_NORMAL_SIZE];
1657
 
        ulint*          offsets         = offsets_;
1658
 
 
1659
 
        /* How many cells to check before temporarily releasing
1660
 
        btr_search_latch. */
1661
 
        ulint           chunk_size = 10000;
1662
 
 
1663
 
        rec_offs_init(offsets_);
1664
 
 
1665
 
        rw_lock_x_lock(&btr_search_latch);
1666
 
        buf_pool_mutex_enter();
1667
 
 
1668
 
        cell_count = hash_get_n_cells(btr_search_sys->hash_index);
1669
 
 
1670
 
        for (i = 0; i < cell_count; i++) {
1671
 
                /* We release btr_search_latch every once in a while to
1672
 
                give other queries a chance to run. */
1673
 
                if ((i != 0) && ((i % chunk_size) == 0)) {
1674
 
                        buf_pool_mutex_exit();
1675
 
                        rw_lock_x_unlock(&btr_search_latch);
1676
 
                        os_thread_yield();
1677
 
                        rw_lock_x_lock(&btr_search_latch);
1678
 
                        buf_pool_mutex_enter();
1679
 
                }
1680
 
 
1681
 
                node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
1682
 
 
1683
 
                for (; node != NULL; node = node->next) {
1684
 
                        const buf_block_t*      block;
1685
 
 
1686
 
                        page = page_align(node->data);
1687
 
                        {
1688
 
                                ulint   page_no = page_get_page_no(page);
1689
 
                                ulint   space_id= page_get_space_id(page);
1690
 
 
1691
 
                                block = buf_block_hash_get(space_id, page_no);
1692
 
                        }
1693
 
 
1694
 
                        if (UNIV_UNLIKELY(!block)) {
1695
 
 
1696
 
                                /* The block is most probably being freed.
1697
 
                                The function buf_LRU_search_and_free_block()
1698
 
                                first removes the block from
1699
 
                                buf_pool->page_hash by calling
1700
 
                                buf_LRU_block_remove_hashed_page().
1701
 
                                After that, it invokes
1702
 
                                btr_search_drop_page_hash_index().
1703
 
                                Let us pretend that the block was also removed
1704
 
                                from the adaptive hash index. */
1705
 
                                continue;
1706
 
                        }
1707
 
 
1708
 
                        ut_a(!dict_index_is_ibuf(block->index));
1709
 
 
1710
 
                        offsets = rec_get_offsets((const rec_t*) node->data,
1711
 
                                                  block->index, offsets,
1712
 
                                                  block->curr_n_fields
1713
 
                                                  + (block->curr_n_bytes > 0),
1714
 
                                                  &heap);
1715
 
 
1716
 
                        if (!block->is_hashed || node->fold
1717
 
                            != rec_fold((rec_t*)(node->data),
1718
 
                                        offsets,
1719
 
                                        block->curr_n_fields,
1720
 
                                        block->curr_n_bytes,
1721
 
                                        btr_page_get_index_id(page))) {
1722
 
                                ok = FALSE;
1723
 
                                ut_print_timestamp(stderr);
1724
 
 
1725
 
                                fprintf(stderr,
1726
 
                                        "  InnoDB: Error in an adaptive hash"
1727
 
                                        " index pointer to page %lu\n"
1728
 
                                        "InnoDB: ptr mem address %p"
1729
 
                                        " index id %lu %lu,"
1730
 
                                        " node fold %lu, rec fold %lu\n",
1731
 
                                        (ulong) page_get_page_no(page),
1732
 
                                        node->data,
1733
 
                                        (ulong) ut_dulint_get_high(
1734
 
                                                btr_page_get_index_id(page)),
1735
 
                                        (ulong) ut_dulint_get_low(
1736
 
                                                btr_page_get_index_id(page)),
1737
 
                                        (ulong) node->fold,
1738
 
                                        (ulong) rec_fold((rec_t*)(node->data),
1739
 
                                                         offsets,
1740
 
                                                         block->curr_n_fields,
1741
 
                                                         block->curr_n_bytes,
1742
 
                                                         btr_page_get_index_id(
1743
 
                                                                 page)));
1744
 
 
1745
 
                                fputs("InnoDB: Record ", stderr);
1746
 
                                rec_print_new(stderr, (rec_t*)node->data,
1747
 
                                              offsets);
1748
 
                                fprintf(stderr, "\nInnoDB: on that page."
1749
 
                                        " Page mem address %p, is hashed %lu,"
1750
 
                                        " n fields %lu, n bytes %lu\n"
1751
 
                                        "InnoDB: side %lu\n",
1752
 
                                        (void*) page, (ulong) block->is_hashed,
1753
 
                                        (ulong) block->curr_n_fields,
1754
 
                                        (ulong) block->curr_n_bytes,
1755
 
                                        (ulong) block->curr_left_side);
1756
 
 
1757
 
                                if (n_page_dumps < 20) {
1758
 
                                        buf_page_print(page, 0);
1759
 
                                        n_page_dumps++;
1760
 
                                }
1761
 
                        }
1762
 
                }
1763
 
        }
1764
 
 
1765
 
        for (i = 0; i < cell_count; i += chunk_size) {
1766
 
                ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
1767
 
 
1768
 
                /* We release btr_search_latch every once in a while to
1769
 
                give other queries a chance to run. */
1770
 
                if (i != 0) {
1771
 
                        buf_pool_mutex_exit();
1772
 
                        rw_lock_x_unlock(&btr_search_latch);
1773
 
                        os_thread_yield();
1774
 
                        rw_lock_x_lock(&btr_search_latch);
1775
 
                        buf_pool_mutex_enter();
1776
 
                }
1777
 
 
1778
 
                if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
1779
 
                        ok = FALSE;
1780
 
                }
1781
 
        }
1782
 
 
1783
 
        buf_pool_mutex_exit();
1784
 
        rw_lock_x_unlock(&btr_search_latch);
1785
 
        if (UNIV_LIKELY_NULL(heap)) {
1786
 
                mem_heap_free(heap);
1787
 
        }
1788
 
 
1789
 
        return(ok);
1790
 
}