~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/buf/buf0lru.c

  • Committer: Brian Aker
  • Date: 2010-10-09 17:44:13 UTC
  • mto: This revision was merged to the branch mainline in revision 1853.
  • Revision ID: brian@tangent.org-20101009174413-4cs0q58kw0fjd45y
First pass through adding back user_locks.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (C) 1995, 2010, Innobase Oy. All Rights Reserved.
 
3
Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved.
4
4
 
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
23
23
Created 11/5/1995 Heikki Tuuri
24
24
*******************************************************/
25
25
 
26
 
#include <config.h>
27
26
#include "buf0lru.h"
28
27
 
29
28
#ifdef UNIV_NONINL
50
49
#include "log0recv.h"
51
50
#include "srv0srv.h"
52
51
 
53
 
/** The number of blocks from the LRU_old pointer onward, including
54
 
the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
55
 
of the whole LRU list length, except that the tolerance defined below
56
 
is allowed. Note that the tolerance must be small enough such that for
57
 
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
58
 
allowed to point to either end of the LRU list. */
 
52
/** The number of blocks from the LRU_old pointer onward, including the block
 
53
pointed to, must be 3/8 of the whole LRU list length, except that the
 
54
tolerance defined below is allowed. Note that the tolerance must be small
 
55
enough such that for even the BUF_LRU_OLD_MIN_LEN long LRU list, the
 
56
LRU_old pointer is not allowed to point to either end of the LRU list. */
59
57
 
60
58
#define BUF_LRU_OLD_TOLERANCE   20
61
59
 
62
 
/** The minimum amount of non-old blocks when the LRU_old list exists
63
 
(that is, when there are more than BUF_LRU_OLD_MIN_LEN blocks).
64
 
@see buf_LRU_old_adjust_len */
65
 
#define BUF_LRU_NON_OLD_MIN_LEN 5
66
 
#if BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN
67
 
# error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN"
68
 
#endif
 
60
/** The whole LRU list length is divided by this number to determine an
 
61
initial segment in buf_LRU_get_recent_limit */
 
62
 
 
63
#define BUF_LRU_INITIAL_RATIO   8
69
64
 
70
65
/** When dropping the search hash index entries before deleting an ibd
71
66
file, we build a local array of pages belonging to that tablespace
97
92
#define BUF_LRU_IO_TO_UNZIP_FACTOR 50
98
93
 
99
94
/** Sampled values buf_LRU_stat_cur.
100
 
Not protected by any mutex.  Updated by buf_LRU_stat_update(). */
 
95
Protected by buf_pool_mutex.  Updated by buf_LRU_stat_update(). */
101
96
static buf_LRU_stat_t           buf_LRU_stat_arr[BUF_LRU_STAT_N_INTERVAL];
102
 
 
103
97
/** Cursor to buf_LRU_stat_arr[] that is updated in a round-robin fashion. */
104
98
static ulint                    buf_LRU_stat_arr_ind;
105
99
 
108
102
UNIV_INTERN buf_LRU_stat_t      buf_LRU_stat_cur;
109
103
 
110
104
/** Running sum of past values of buf_LRU_stat_cur.
111
 
Updated by buf_LRU_stat_update().  Not Protected by any mutex. */
 
105
Updated by buf_LRU_stat_update().  Protected by buf_pool_mutex. */
112
106
UNIV_INTERN buf_LRU_stat_t      buf_LRU_stat_sum;
113
107
 
114
108
/* @} */
115
109
 
116
 
/** @name Heuristics for detecting index scan @{ */
117
 
/** Move blocks to "new" LRU list only if the first access was at
118
 
least this many milliseconds ago.  Not protected by any mutex or latch. */
119
 
UNIV_INTERN uint        buf_LRU_old_threshold_ms;
120
 
/* @} */
121
 
 
122
110
/******************************************************************//**
123
111
Takes a block out of the LRU list and page hash table.
124
112
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
125
 
the object will be freed and buf_pool->zip_mutex will be released.
 
113
the object will be freed and buf_pool_zip_mutex will be released.
126
114
 
127
115
If a compressed page or a compressed-only block descriptor is freed,
128
116
other compressed pages or compressed-only block descriptors may be
153
141
@return TRUE if should use unzip_LRU */
154
142
UNIV_INLINE
155
143
ibool
156
 
buf_LRU_evict_from_unzip_LRU(
157
 
/*=========================*/
158
 
        buf_pool_t*     buf_pool)
 
144
buf_LRU_evict_from_unzip_LRU(void)
 
145
/*==============================*/
159
146
{
160
147
        ulint   io_avg;
161
148
        ulint   unzip_avg;
162
149
 
163
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
150
        ut_ad(buf_pool_mutex_own());
164
151
 
165
152
        /* If the unzip_LRU list is empty, we can only use the LRU. */
166
153
        if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
228
215
void
229
216
buf_LRU_drop_page_hash_for_tablespace(
230
217
/*==================================*/
231
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
232
 
        ulint           id)             /*!< in: space id */
 
218
        ulint   id)     /*!< in: space id */
233
219
{
234
220
        buf_page_t*     bpage;
235
221
        ulint*          page_arr;
244
230
                return;
245
231
        }
246
232
 
247
 
        page_arr = static_cast<unsigned long *>(ut_malloc(
248
 
                sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE));
249
 
 
250
 
        buf_pool_mutex_enter(buf_pool);
 
233
        page_arr = ut_malloc(sizeof(ulint)
 
234
                             * BUF_LRU_DROP_SEARCH_HASH_SIZE);
 
235
        buf_pool_mutex_enter();
251
236
 
252
237
scan_again:
253
238
        num_entries = 0;
285
270
                        if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
286
271
                                goto next_page;
287
272
                        }
288
 
 
289
 
                        /* Array full. We release the buf_pool->mutex to
 
273
                        /* Array full. We release the buf_pool_mutex to
290
274
                        obey the latching order. */
291
 
                        buf_pool_mutex_exit(buf_pool);
292
 
 
293
 
                        buf_LRU_drop_page_hash_batch(
294
 
                                id, zip_size, page_arr, num_entries);
295
 
 
 
275
                        buf_pool_mutex_exit();
 
276
 
 
277
                        buf_LRU_drop_page_hash_batch(id, zip_size, page_arr,
 
278
                                                     num_entries);
296
279
                        num_entries = 0;
297
 
 
298
 
                        buf_pool_mutex_enter(buf_pool);
 
280
                        buf_pool_mutex_enter();
299
281
                } else {
300
282
                        mutex_exit(block_mutex);
301
283
                }
320
302
                }
321
303
        }
322
304
 
323
 
        buf_pool_mutex_exit(buf_pool);
 
305
        buf_pool_mutex_exit();
324
306
 
325
307
        /* Drop any remaining batch of search hashed pages. */
326
308
        buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
328
310
}
329
311
 
330
312
/******************************************************************//**
331
 
Invalidates all pages belonging to a given tablespace inside a specific
332
 
buffer pool instance when we are deleting the data file(s) of that
333
 
tablespace. */
334
 
static
 
313
Invalidates all pages belonging to a given tablespace when we are deleting
 
314
the data file(s) of that tablespace. */
 
315
UNIV_INTERN
335
316
void
336
 
buf_LRU_invalidate_tablespace_buf_pool_instance(
337
 
/*============================================*/
338
 
        buf_pool_t*     buf_pool,       /*!< buffer pool instance */
339
 
        ulint           id)             /*!< in: space id */
 
317
buf_LRU_invalidate_tablespace(
 
318
/*==========================*/
 
319
        ulint   id)     /*!< in: space id */
340
320
{
341
321
        buf_page_t*     bpage;
342
322
        ibool           all_freed;
343
323
 
 
324
        /* Before we attempt to drop pages one by one we first
 
325
        attempt to drop page hash index entries in batches to make
 
326
        it more efficient. The batching attempt is a best effort
 
327
        attempt and does not guarantee that all pages hash entries
 
328
        will be dropped. We get rid of remaining page hash entries
 
329
        one by one below. */
 
330
        buf_LRU_drop_page_hash_for_tablespace(id);
 
331
 
344
332
scan_again:
345
 
        buf_pool_mutex_enter(buf_pool);
 
333
        buf_pool_mutex_enter();
346
334
 
347
335
        all_freed = TRUE;
348
336
 
349
337
        bpage = UT_LIST_GET_LAST(buf_pool->LRU);
350
338
 
351
339
        while (bpage != NULL) {
 
340
                mutex_t*        block_mutex = buf_page_get_mutex(bpage);
352
341
                buf_page_t*     prev_bpage;
353
 
                ibool           prev_bpage_buf_fix = FALSE;
354
342
 
355
343
                ut_a(buf_page_in_file(bpage));
356
344
 
 
345
                mutex_enter(block_mutex);
357
346
                prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
358
347
 
359
 
                /* bpage->space and bpage->io_fix are protected by
360
 
                buf_pool->mutex and block_mutex.  It is safe to check
361
 
                them while holding buf_pool->mutex only. */
362
 
 
363
 
                if (buf_page_get_space(bpage) != id) {
364
 
                        /* Skip this block, as it does not belong to
365
 
                        the space that is being invalidated. */
366
 
                } else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
367
 
                        /* We cannot remove this page during this scan
368
 
                        yet; maybe the system is currently reading it
369
 
                        in, or flushing the modifications to the file */
370
 
 
371
 
                        all_freed = FALSE;
372
 
                } else {
373
 
                        mutex_t* block_mutex = buf_page_get_mutex(bpage);
374
 
                        mutex_enter(block_mutex);
375
 
 
376
 
                        if (bpage->buf_fix_count > 0) {
 
348
                if (buf_page_get_space(bpage) == id) {
 
349
                        if (bpage->buf_fix_count > 0
 
350
                            || buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
377
351
 
378
352
                                /* We cannot remove this page during
379
353
                                this scan yet; maybe the system is
393
367
                                        (ulong) buf_page_get_page_no(bpage));
394
368
                        }
395
369
#endif
396
 
                        if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
397
 
                                /* This is a compressed-only block
398
 
                                descriptor.  Ensure that prev_bpage
399
 
                                cannot be relocated when bpage is freed. */
400
 
                                if (UNIV_LIKELY(prev_bpage != NULL)) {
401
 
                                        switch (buf_page_get_state(
402
 
                                                        prev_bpage)) {
403
 
                                        case BUF_BLOCK_FILE_PAGE:
404
 
                                                /* Descriptors of uncompressed
405
 
                                                blocks will not be relocated,
406
 
                                                because we are holding the
407
 
                                                buf_pool->mutex. */
408
 
                                                break;
409
 
                                        case BUF_BLOCK_ZIP_PAGE:
410
 
                                        case BUF_BLOCK_ZIP_DIRTY:
411
 
                                                /* Descriptors of compressed-
412
 
                                                only blocks can be relocated,
413
 
                                                unless they are buffer-fixed.
414
 
                                                Because both bpage and
415
 
                                                prev_bpage are protected by
416
 
                                                buf_pool_zip_mutex, it is
417
 
                                                not necessary to acquire
418
 
                                                further mutexes. */
419
 
                                                ut_ad(&buf_pool->zip_mutex
420
 
                                                      == block_mutex);
421
 
                                                ut_ad(mutex_own(block_mutex));
422
 
                                                prev_bpage_buf_fix = TRUE;
423
 
                                                prev_bpage->buf_fix_count++;
424
 
                                                break;
425
 
                                        default:
426
 
                                                ut_error;
427
 
                                        }
428
 
                                }
429
 
                        } else if (((buf_block_t*) bpage)->is_hashed) {
 
370
                        if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE
 
371
                            && ((buf_block_t*) bpage)->is_hashed) {
430
372
                                ulint   page_no;
431
373
                                ulint   zip_size;
432
374
 
433
 
                                buf_pool_mutex_exit(buf_pool);
 
375
                                buf_pool_mutex_exit();
434
376
 
435
377
                                zip_size = buf_page_get_zip_size(bpage);
436
378
                                page_no = buf_page_get_page_no(bpage);
450
392
                                buf_flush_remove(bpage);
451
393
                        }
452
394
 
453
 
                        /* Remove from the LRU list. */
454
 
 
 
395
                        /* Remove from the LRU list */
455
396
                        if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
456
397
                            != BUF_BLOCK_ZIP_FREE) {
457
398
                                buf_LRU_block_free_hashed_page((buf_block_t*)
460
401
                                /* The block_mutex should have been
461
402
                                released by buf_LRU_block_remove_hashed_page()
462
403
                                when it returns BUF_BLOCK_ZIP_FREE. */
463
 
                                ut_ad(block_mutex == &buf_pool->zip_mutex);
 
404
                                ut_ad(block_mutex == &buf_pool_zip_mutex);
464
405
                                ut_ad(!mutex_own(block_mutex));
465
406
 
466
 
                                if (prev_bpage_buf_fix) {
467
 
                                        /* We temporarily buffer-fixed
468
 
                                        prev_bpage, so that
469
 
                                        buf_buddy_free() could not
470
 
                                        relocate it, in case it was a
471
 
                                        compressed-only block
472
 
                                        descriptor. */
473
 
 
474
 
                                        mutex_enter(block_mutex);
475
 
                                        ut_ad(prev_bpage->buf_fix_count > 0);
476
 
                                        prev_bpage->buf_fix_count--;
477
 
                                        mutex_exit(block_mutex);
478
 
                                }
479
 
 
480
 
                                goto next_page_no_mutex;
 
407
                                /* The compressed block descriptor
 
408
                                (bpage) has been deallocated and
 
409
                                block_mutex released.  Also,
 
410
                                buf_buddy_free() may have relocated
 
411
                                prev_bpage.  Rescan the LRU list. */
 
412
 
 
413
                                bpage = UT_LIST_GET_LAST(buf_pool->LRU);
 
414
                                continue;
481
415
                        }
 
416
                }
482
417
next_page:
483
 
                        mutex_exit(block_mutex);
484
 
                }
485
 
 
486
 
next_page_no_mutex:
 
418
                mutex_exit(block_mutex);
487
419
                bpage = prev_bpage;
488
420
        }
489
421
 
490
 
        buf_pool_mutex_exit(buf_pool);
 
422
        buf_pool_mutex_exit();
491
423
 
492
424
        if (!all_freed) {
493
425
                os_thread_sleep(20000);
497
429
}
498
430
 
499
431
/******************************************************************//**
500
 
Invalidates all pages belonging to a given tablespace when we are deleting
501
 
the data file(s) of that tablespace. */
 
432
Gets the minimum LRU_position field for the blocks in an initial segment
 
433
(determined by BUF_LRU_INITIAL_RATIO) of the LRU list. The limit is not
 
434
guaranteed to be precise, because the ulint_clock may wrap around.
 
435
@return the limit; zero if could not determine it */
502
436
UNIV_INTERN
503
 
void
504
 
buf_LRU_invalidate_tablespace(
 
437
ulint
 
438
buf_LRU_get_recent_limit(void)
505
439
/*==========================*/
506
 
        ulint   id)     /*!< in: space id */
507
440
{
508
 
        ulint   i;
509
 
 
510
 
        /* Before we attempt to drop pages one by one we first
511
 
        attempt to drop page hash index entries in batches to make
512
 
        it more efficient. The batching attempt is a best effort
513
 
        attempt and does not guarantee that all pages hash entries
514
 
        will be dropped. We get rid of remaining page hash entries
515
 
        one by one below. */
516
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
517
 
                buf_pool_t*     buf_pool;
518
 
 
519
 
                buf_pool = buf_pool_from_array(i);
520
 
                buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
521
 
                buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
 
441
        const buf_page_t*       bpage;
 
442
        ulint                   len;
 
443
        ulint                   limit;
 
444
 
 
445
        buf_pool_mutex_enter();
 
446
 
 
447
        len = UT_LIST_GET_LEN(buf_pool->LRU);
 
448
 
 
449
        if (len < BUF_LRU_OLD_MIN_LEN) {
 
450
                /* The LRU list is too short to do read-ahead */
 
451
 
 
452
                buf_pool_mutex_exit();
 
453
 
 
454
                return(0);
522
455
        }
 
456
 
 
457
        bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
 
458
 
 
459
        limit = buf_page_get_LRU_position(bpage);
 
460
        len /= BUF_LRU_INITIAL_RATIO;
 
461
 
 
462
        buf_pool_mutex_exit();
 
463
 
 
464
        return(limit > len ? (limit - len) : 0);
523
465
}
524
466
 
525
467
/********************************************************************//**
531
473
        buf_page_t*     bpage)  /*!< in: pointer to the block in question */
532
474
{
533
475
        buf_page_t*     b;
534
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
535
476
 
536
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
477
        ut_ad(buf_pool_mutex_own());
537
478
        ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
538
479
 
539
480
        /* Find the first successor of bpage in the LRU list
563
504
ibool
564
505
buf_LRU_free_from_unzip_LRU_list(
565
506
/*=============================*/
566
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
567
 
        ulint           n_iterations)   /*!< in: how many times this has
568
 
                                        been called repeatedly without
569
 
                                        result: a high value means that
570
 
                                        we should search farther; we will
571
 
                                        search n_iterations / 5 of the
572
 
                                        unzip_LRU list, or nothing if
573
 
                                        n_iterations >= 5 */
 
507
        ulint   n_iterations)   /*!< in: how many times this has been called
 
508
                                repeatedly without result: a high value means
 
509
                                that we should search farther; we will search
 
510
                                n_iterations / 5 of the unzip_LRU list,
 
511
                                or nothing if n_iterations >= 5 */
574
512
{
575
513
        buf_block_t*    block;
576
514
        ulint           distance;
577
515
 
578
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
516
        ut_ad(buf_pool_mutex_own());
579
517
 
580
518
        /* Theoratically it should be much easier to find a victim
581
519
        from unzip_LRU as we can choose even a dirty block (as we'll
585
523
        if we have done five iterations so far. */
586
524
 
587
525
        if (UNIV_UNLIKELY(n_iterations >= 5)
588
 
            || !buf_LRU_evict_from_unzip_LRU(buf_pool)) {
 
526
            || !buf_LRU_evict_from_unzip_LRU()) {
589
527
 
590
528
                return(FALSE);
591
529
        }
637
575
ibool
638
576
buf_LRU_free_from_common_LRU_list(
639
577
/*==============================*/
640
 
        buf_pool_t*     buf_pool,
641
 
        ulint           n_iterations)
642
 
                                /*!< in: how many times this has been called
 
578
        ulint   n_iterations)   /*!< in: how many times this has been called
643
579
                                repeatedly without result: a high value means
644
580
                                that we should search farther; if
645
581
                                n_iterations < 10, then we search
649
585
        buf_page_t*     bpage;
650
586
        ulint           distance;
651
587
 
652
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
588
        ut_ad(buf_pool_mutex_own());
653
589
 
654
590
        distance = 100 + (n_iterations * buf_pool->curr_size) / 10;
655
591
 
658
594
             bpage = UT_LIST_GET_PREV(LRU, bpage), distance--) {
659
595
 
660
596
                enum buf_lru_free_block_status  freed;
661
 
                unsigned                        accessed;
662
597
                mutex_t*                        block_mutex
663
598
                        = buf_page_get_mutex(bpage);
664
599
 
666
601
                ut_ad(bpage->in_LRU_list);
667
602
 
668
603
                mutex_enter(block_mutex);
669
 
                accessed = buf_page_is_accessed(bpage);
670
604
                freed = buf_LRU_free_block(bpage, TRUE, NULL);
671
605
                mutex_exit(block_mutex);
672
606
 
673
607
                switch (freed) {
674
608
                case BUF_LRU_FREED:
675
 
                        /* Keep track of pages that are evicted without
676
 
                        ever being accessed. This gives us a measure of
677
 
                        the effectiveness of readahead */
678
 
                        if (!accessed) {
679
 
                                ++buf_pool->stat.n_ra_pages_evicted;
680
 
                        }
681
609
                        return(TRUE);
682
610
 
683
611
                case BUF_LRU_NOT_FREED:
706
634
ibool
707
635
buf_LRU_search_and_free_block(
708
636
/*==========================*/
709
 
        buf_pool_t*     buf_pool,
710
 
                                /*!< in: buffer pool instance */
711
 
        ulint           n_iterations)
712
 
                                /*!< in: how many times this has been called
 
637
        ulint   n_iterations)   /*!< in: how many times this has been called
713
638
                                repeatedly without result: a high value means
714
639
                                that we should search farther; if
715
640
                                n_iterations < 10, then we search
720
645
{
721
646
        ibool   freed = FALSE;
722
647
 
723
 
        buf_pool_mutex_enter(buf_pool);
 
648
        buf_pool_mutex_enter();
724
649
 
725
 
        freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations);
 
650
        freed = buf_LRU_free_from_unzip_LRU_list(n_iterations);
726
651
 
727
652
        if (!freed) {
728
 
                freed = buf_LRU_free_from_common_LRU_list(
729
 
                        buf_pool, n_iterations);
 
653
                freed = buf_LRU_free_from_common_LRU_list(n_iterations);
730
654
        }
731
655
 
732
656
        if (!freed) {
735
659
                buf_pool->LRU_flush_ended--;
736
660
        }
737
661
 
738
 
        buf_pool_mutex_exit(buf_pool);
 
662
        buf_pool_mutex_exit();
739
663
 
740
664
        return(freed);
741
665
}
750
674
wasted. */
751
675
UNIV_INTERN
752
676
void
753
 
buf_LRU_try_free_flushed_blocks(
754
 
/*============================*/
755
 
        buf_pool_t*     buf_pool)               /*!< in: buffer pool instance */
 
677
buf_LRU_try_free_flushed_blocks(void)
 
678
/*=================================*/
756
679
{
757
 
 
758
 
        if (buf_pool == NULL) {
759
 
                ulint   i;
760
 
 
761
 
                for (i = 0; i < srv_buf_pool_instances; i++) {
762
 
                        buf_pool = buf_pool_from_array(i);
763
 
                        buf_LRU_try_free_flushed_blocks(buf_pool);
764
 
                }
765
 
        } else {
766
 
                buf_pool_mutex_enter(buf_pool);
767
 
 
768
 
                while (buf_pool->LRU_flush_ended > 0) {
769
 
 
770
 
                        buf_pool_mutex_exit(buf_pool);
771
 
 
772
 
                        buf_LRU_search_and_free_block(buf_pool, 1);
773
 
 
774
 
                        buf_pool_mutex_enter(buf_pool);
775
 
                }
776
 
 
777
 
                buf_pool_mutex_exit(buf_pool);
 
680
        buf_pool_mutex_enter();
 
681
 
 
682
        while (buf_pool->LRU_flush_ended > 0) {
 
683
 
 
684
                buf_pool_mutex_exit();
 
685
 
 
686
                buf_LRU_search_and_free_block(1);
 
687
 
 
688
                buf_pool_mutex_enter();
778
689
        }
 
690
 
 
691
        buf_pool_mutex_exit();
779
692
}
780
693
 
781
694
/******************************************************************//**
782
 
Returns TRUE if less than 25 % of the buffer pool in any instance is
783
 
available. This can be used in heuristics to prevent huge transactions
784
 
eating up the whole buffer pool for their locks.
 
695
Returns TRUE if less than 25 % of the buffer pool is available. This can be
 
696
used in heuristics to prevent huge transactions eating up the whole buffer
 
697
pool for their locks.
785
698
@return TRUE if less than 25 % of buffer pool left */
786
699
UNIV_INTERN
787
700
ibool
788
701
buf_LRU_buf_pool_running_out(void)
789
702
/*==============================*/
790
703
{
791
 
        ulint   i;
792
 
        ibool   ret = FALSE;
793
 
 
794
 
        for (i = 0; i < srv_buf_pool_instances && !ret; i++) {
795
 
                buf_pool_t*     buf_pool;
796
 
 
797
 
                buf_pool = buf_pool_from_array(i);
798
 
 
799
 
                buf_pool_mutex_enter(buf_pool);
800
 
 
801
 
                if (!recv_recovery_on
802
 
                    && UT_LIST_GET_LEN(buf_pool->free)
803
 
                       + UT_LIST_GET_LEN(buf_pool->LRU)
804
 
                       < buf_pool->curr_size / 4) {
805
 
 
806
 
                        ret = TRUE;
807
 
                }
808
 
 
809
 
                buf_pool_mutex_exit(buf_pool);
 
704
        ibool   ret     = FALSE;
 
705
 
 
706
        buf_pool_mutex_enter();
 
707
 
 
708
        if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
 
709
            + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) {
 
710
 
 
711
                ret = TRUE;
810
712
        }
811
713
 
 
714
        buf_pool_mutex_exit();
 
715
 
812
716
        return(ret);
813
717
}
814
718
 
818
722
@return a free control block, or NULL if the buf_block->free list is empty */
819
723
UNIV_INTERN
820
724
buf_block_t*
821
 
buf_LRU_get_free_only(
822
 
/*==================*/
823
 
        buf_pool_t*     buf_pool)
 
725
buf_LRU_get_free_only(void)
 
726
/*=======================*/
824
727
{
825
728
        buf_block_t*    block;
826
729
 
827
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
730
        ut_ad(buf_pool_mutex_own());
828
731
 
829
732
        block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
830
733
 
831
734
        if (block) {
832
 
 
833
735
                ut_ad(block->page.in_free_list);
834
736
                ut_d(block->page.in_free_list = FALSE);
835
737
                ut_ad(!block->page.in_flush_list);
842
744
                buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE);
843
745
                UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
844
746
 
845
 
                ut_ad(buf_pool_from_block(block) == buf_pool);
846
 
 
847
747
                mutex_exit(&block->mutex);
848
748
        }
849
749
 
859
759
buf_block_t*
860
760
buf_LRU_get_free_block(
861
761
/*===================*/
862
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
863
 
        ulint           zip_size)       /*!< in: compressed page size in bytes,
864
 
                                        or 0 if uncompressed tablespace */
 
762
        ulint   zip_size)       /*!< in: compressed page size in bytes,
 
763
                                or 0 if uncompressed tablespace */
865
764
{
866
765
        buf_block_t*    block           = NULL;
867
766
        ibool           freed;
869
768
        ibool           mon_value_was   = FALSE;
870
769
        ibool           started_monitor = FALSE;
871
770
loop:
872
 
        buf_pool_mutex_enter(buf_pool);
 
771
        buf_pool_mutex_enter();
873
772
 
874
773
        if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
875
774
            + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
936
835
        }
937
836
 
938
837
        /* If there is a block in the free list, take it */
939
 
        block = buf_LRU_get_free_only(buf_pool);
 
838
        block = buf_LRU_get_free_only();
940
839
        if (block) {
941
840
 
942
 
                ut_ad(buf_pool_from_block(block) == buf_pool);
943
 
 
944
841
#ifdef UNIV_DEBUG
945
842
                block->page.zip.m_start =
946
843
#endif /* UNIV_DEBUG */
951
848
                if (UNIV_UNLIKELY(zip_size)) {
952
849
                        ibool   lru;
953
850
                        page_zip_set_size(&block->page.zip, zip_size);
954
 
 
955
 
                        block->page.zip.data = static_cast<unsigned char *>(buf_buddy_alloc(
956
 
                                buf_pool, zip_size, &lru));
957
 
 
 
851
                        block->page.zip.data = buf_buddy_alloc(zip_size, &lru);
958
852
                        UNIV_MEM_DESC(block->page.zip.data, zip_size, block);
959
853
                } else {
960
854
                        page_zip_set_size(&block->page.zip, 0);
961
855
                        block->page.zip.data = NULL;
962
856
                }
963
857
 
964
 
                buf_pool_mutex_exit(buf_pool);
 
858
                buf_pool_mutex_exit();
965
859
 
966
860
                if (started_monitor) {
967
861
                        srv_print_innodb_monitor = mon_value_was;
973
867
        /* If no block was in the free list, search from the end of the LRU
974
868
        list and try to free a block there */
975
869
 
976
 
        buf_pool_mutex_exit(buf_pool);
 
870
        buf_pool_mutex_exit();
977
871
 
978
 
        freed = buf_LRU_search_and_free_block(buf_pool, n_iterations);
 
872
        freed = buf_LRU_search_and_free_block(n_iterations);
979
873
 
980
874
        if (freed > 0) {
981
875
                goto loop;
1017
911
 
1018
912
        /* No free block was found: try to flush the LRU list */
1019
913
 
1020
 
        buf_flush_free_margin(buf_pool);
 
914
        buf_flush_free_margin();
1021
915
        ++srv_buf_pool_wait_free;
1022
916
 
1023
917
        os_aio_simulated_wake_handler_threads();
1024
918
 
1025
 
        buf_pool_mutex_enter(buf_pool);
 
919
        buf_pool_mutex_enter();
1026
920
 
1027
921
        if (buf_pool->LRU_flush_ended > 0) {
1028
922
                /* We have written pages in an LRU flush. To make the insert
1029
923
                buffer more efficient, we try to move these pages to the free
1030
924
                list. */
1031
925
 
1032
 
                buf_pool_mutex_exit(buf_pool);
 
926
                buf_pool_mutex_exit();
1033
927
 
1034
 
                buf_LRU_try_free_flushed_blocks(buf_pool);
 
928
                buf_LRU_try_free_flushed_blocks();
1035
929
        } else {
1036
 
                buf_pool_mutex_exit(buf_pool);
 
930
                buf_pool_mutex_exit();
1037
931
        }
1038
932
 
1039
933
        if (n_iterations > 10) {
1051
945
is inside the allowed limits. */
1052
946
UNIV_INLINE
1053
947
void
1054
 
buf_LRU_old_adjust_len(
1055
 
/*===================*/
1056
 
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
 
948
buf_LRU_old_adjust_len(void)
 
949
/*========================*/
1057
950
{
1058
951
        ulint   old_len;
1059
952
        ulint   new_len;
1060
953
 
1061
954
        ut_a(buf_pool->LRU_old);
1062
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1063
 
        ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
1064
 
        ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
1065
 
#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
1066
 
# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)"
 
955
        ut_ad(buf_pool_mutex_own());
 
956
#if 3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5
 
957
# error "3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5"
1067
958
#endif
1068
959
#ifdef UNIV_LRU_DEBUG
1069
960
        /* buf_pool->LRU_old must be the first item in the LRU list
1075
966
             || UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
1076
967
#endif /* UNIV_LRU_DEBUG */
1077
968
 
1078
 
        old_len = buf_pool->LRU_old_len;
1079
 
        new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1080
 
                         * buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
1081
 
                         UT_LIST_GET_LEN(buf_pool->LRU)
1082
 
                         - (BUF_LRU_OLD_TOLERANCE
1083
 
                            + BUF_LRU_NON_OLD_MIN_LEN));
1084
 
 
1085
969
        for (;;) {
1086
 
                buf_page_t*     LRU_old = buf_pool->LRU_old;
 
970
                old_len = buf_pool->LRU_old_len;
 
971
                new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);
1087
972
 
1088
 
                ut_a(LRU_old);
1089
 
                ut_ad(LRU_old->in_LRU_list);
 
973
                ut_ad(buf_pool->LRU_old->in_LRU_list);
 
974
                ut_a(buf_pool->LRU_old);
1090
975
#ifdef UNIV_LRU_DEBUG
1091
 
                ut_a(LRU_old->old);
 
976
                ut_a(buf_pool->LRU_old->old);
1092
977
#endif /* UNIV_LRU_DEBUG */
1093
978
 
1094
979
                /* Update the LRU_old pointer if necessary */
1095
980
 
1096
 
                if (old_len + BUF_LRU_OLD_TOLERANCE < new_len) {
 
981
                if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) {
1097
982
 
1098
 
                        buf_pool->LRU_old = LRU_old = UT_LIST_GET_PREV(
1099
 
                                LRU, LRU_old);
 
983
                        buf_pool->LRU_old = UT_LIST_GET_PREV(
 
984
                                LRU, buf_pool->LRU_old);
1100
985
#ifdef UNIV_LRU_DEBUG
1101
 
                        ut_a(!LRU_old->old);
 
986
                        ut_a(!buf_pool->LRU_old->old);
1102
987
#endif /* UNIV_LRU_DEBUG */
1103
 
                        old_len = ++buf_pool->LRU_old_len;
1104
 
                        buf_page_set_old(LRU_old, TRUE);
 
988
                        buf_page_set_old(buf_pool->LRU_old, TRUE);
 
989
                        buf_pool->LRU_old_len++;
1105
990
 
1106
991
                } else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
1107
992
 
1108
 
                        buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
1109
 
                        old_len = --buf_pool->LRU_old_len;
1110
 
                        buf_page_set_old(LRU_old, FALSE);
 
993
                        buf_page_set_old(buf_pool->LRU_old, FALSE);
 
994
                        buf_pool->LRU_old = UT_LIST_GET_NEXT(
 
995
                                LRU, buf_pool->LRU_old);
 
996
                        buf_pool->LRU_old_len--;
1111
997
                } else {
1112
998
                        return;
1113
999
                }
1119
1005
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
1120
1006
static
1121
1007
void
1122
 
buf_LRU_old_init(
1123
 
/*=============*/
1124
 
        buf_pool_t*     buf_pool)
 
1008
buf_LRU_old_init(void)
 
1009
/*==================*/
1125
1010
{
1126
1011
        buf_page_t*     bpage;
1127
1012
 
1128
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1013
        ut_ad(buf_pool_mutex_own());
1129
1014
        ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
1130
1015
 
1131
1016
        /* We first initialize all blocks in the LRU list as old and then use
1132
1017
        the adjust function to move the LRU_old pointer to the right
1133
1018
        position */
1134
1019
 
1135
 
        for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage != NULL;
1136
 
             bpage = UT_LIST_GET_PREV(LRU, bpage)) {
 
1020
        bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
 
1021
 
 
1022
        while (bpage != NULL) {
1137
1023
                ut_ad(bpage->in_LRU_list);
1138
 
                ut_ad(buf_page_in_file(bpage));
1139
 
                /* This loop temporarily violates the
1140
 
                assertions of buf_page_set_old(). */
1141
 
                bpage->old = TRUE;
 
1024
                buf_page_set_old(bpage, TRUE);
 
1025
                bpage = UT_LIST_GET_NEXT(LRU, bpage);
1142
1026
        }
1143
1027
 
1144
1028
        buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
1145
1029
        buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
1146
1030
 
1147
 
        buf_LRU_old_adjust_len(buf_pool);
 
1031
        buf_LRU_old_adjust_len();
1148
1032
}
1149
1033
 
1150
1034
/******************************************************************//**
1155
1039
/*=================================*/
1156
1040
        buf_page_t*     bpage)  /*!< in/out: control block */
1157
1041
{
1158
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1159
 
 
1160
1042
        ut_ad(buf_pool);
1161
1043
        ut_ad(bpage);
1162
1044
        ut_ad(buf_page_in_file(bpage));
1163
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1045
        ut_ad(buf_pool_mutex_own());
1164
1046
 
1165
1047
        if (buf_page_belongs_to_unzip_LRU(bpage)) {
1166
1048
                buf_block_t*    block = (buf_block_t*) bpage;
1180
1062
/*=================*/
1181
1063
        buf_page_t*     bpage)  /*!< in: control block */
1182
1064
{
1183
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1184
 
 
1185
1065
        ut_ad(buf_pool);
1186
1066
        ut_ad(bpage);
1187
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1067
        ut_ad(buf_pool_mutex_own());
1188
1068
 
1189
1069
        ut_a(buf_page_in_file(bpage));
1190
1070
 
1195
1075
 
1196
1076
        if (UNIV_UNLIKELY(bpage == buf_pool->LRU_old)) {
1197
1077
 
1198
 
                /* Below: the previous block is guaranteed to exist,
1199
 
                because the LRU_old pointer is only allowed to differ
1200
 
                by BUF_LRU_OLD_TOLERANCE from strict
1201
 
                buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
1202
 
                list length. */
1203
 
                buf_page_t*     prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
 
1078
                /* Below: the previous block is guaranteed to exist, because
 
1079
                the LRU_old pointer is only allowed to differ by the
 
1080
                tolerance value from strict 3/8 of the LRU list length. */
1204
1081
 
1205
 
                ut_a(prev_bpage);
 
1082
                buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, bpage);
 
1083
                ut_a(buf_pool->LRU_old);
1206
1084
#ifdef UNIV_LRU_DEBUG
1207
 
                ut_a(!prev_bpage->old);
 
1085
                ut_a(!buf_pool->LRU_old->old);
1208
1086
#endif /* UNIV_LRU_DEBUG */
1209
 
                buf_pool->LRU_old = prev_bpage;
1210
 
                buf_page_set_old(prev_bpage, TRUE);
 
1087
                buf_page_set_old(buf_pool->LRU_old, TRUE);
1211
1088
 
1212
1089
                buf_pool->LRU_old_len++;
1213
1090
        }
1218
1095
 
1219
1096
        buf_unzip_LRU_remove_block_if_needed(bpage);
1220
1097
 
1221
 
        /* If the LRU list is so short that LRU_old is not defined,
1222
 
        clear the "old" flags and return */
 
1098
        /* If the LRU list is so short that LRU_old not defined, return */
1223
1099
        if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
1224
1100
 
1225
 
                for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage != NULL;
1226
 
                     bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
1227
 
                        /* This loop temporarily violates the
1228
 
                        assertions of buf_page_set_old(). */
1229
 
                        bpage->old = FALSE;
1230
 
                }
1231
 
 
1232
1101
                buf_pool->LRU_old = NULL;
1233
 
                buf_pool->LRU_old_len = 0;
1234
1102
 
1235
1103
                return;
1236
1104
        }
1244
1112
        }
1245
1113
 
1246
1114
        /* Adjust the length of the old block list if necessary */
1247
 
        buf_LRU_old_adjust_len(buf_pool);
 
1115
        buf_LRU_old_adjust_len();
1248
1116
}
1249
1117
 
1250
1118
/******************************************************************//**
1257
1125
        ibool           old)    /*!< in: TRUE if should be put to the end
1258
1126
                                of the list, else put to the start */
1259
1127
{
1260
 
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
1261
 
 
1262
1128
        ut_ad(buf_pool);
1263
1129
        ut_ad(block);
1264
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1130
        ut_ad(buf_pool_mutex_own());
1265
1131
 
1266
1132
        ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
1267
1133
 
1283
1149
/*=========================*/
1284
1150
        buf_page_t*     bpage)  /*!< in: control block */
1285
1151
{
1286
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
 
1152
        buf_page_t*     last_bpage;
1287
1153
 
1288
1154
        ut_ad(buf_pool);
1289
1155
        ut_ad(bpage);
1290
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1156
        ut_ad(buf_pool_mutex_own());
1291
1157
 
1292
1158
        ut_a(buf_page_in_file(bpage));
1293
1159
 
 
1160
        last_bpage = UT_LIST_GET_LAST(buf_pool->LRU);
 
1161
 
 
1162
        if (last_bpage) {
 
1163
                bpage->LRU_position = last_bpage->LRU_position;
 
1164
        } else {
 
1165
                bpage->LRU_position = buf_pool_clock_tic();
 
1166
        }
 
1167
 
1294
1168
        ut_ad(!bpage->in_LRU_list);
1295
1169
        UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage);
1296
1170
        ut_d(bpage->in_LRU_list = TRUE);
1297
1171
 
 
1172
        buf_page_set_old(bpage, TRUE);
 
1173
 
 
1174
        if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
 
1175
 
 
1176
                buf_pool->LRU_old_len++;
 
1177
        }
 
1178
 
1298
1179
        if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
1299
1180
 
1300
1181
                ut_ad(buf_pool->LRU_old);
1301
1182
 
1302
1183
                /* Adjust the length of the old block list if necessary */
1303
1184
 
1304
 
                buf_page_set_old(bpage, TRUE);
1305
 
                buf_pool->LRU_old_len++;
1306
 
                buf_LRU_old_adjust_len(buf_pool);
 
1185
                buf_LRU_old_adjust_len();
1307
1186
 
1308
1187
        } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1309
1188
 
1310
1189
                /* The LRU list is now long enough for LRU_old to become
1311
1190
                defined: init it */
1312
1191
 
1313
 
                buf_LRU_old_init(buf_pool);
1314
 
        } else {
1315
 
                buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
 
1192
                buf_LRU_old_init();
1316
1193
        }
1317
1194
 
1318
1195
        /* If this is a zipped block with decompressed frame as well
1334
1211
                                LRU list is very short, the block is added to
1335
1212
                                the start, regardless of this parameter */
1336
1213
{
1337
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1338
 
 
1339
1214
        ut_ad(buf_pool);
1340
1215
        ut_ad(bpage);
1341
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1216
        ut_ad(buf_pool_mutex_own());
1342
1217
 
1343
1218
        ut_a(buf_page_in_file(bpage));
1344
1219
        ut_ad(!bpage->in_LRU_list);
1347
1222
 
1348
1223
                UT_LIST_ADD_FIRST(LRU, buf_pool->LRU, bpage);
1349
1224
 
 
1225
                bpage->LRU_position = buf_pool_clock_tic();
1350
1226
                bpage->freed_page_clock = buf_pool->freed_page_clock;
1351
1227
        } else {
1352
1228
#ifdef UNIV_LRU_DEBUG
1361
1237
                UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, buf_pool->LRU_old,
1362
1238
                                     bpage);
1363
1239
                buf_pool->LRU_old_len++;
 
1240
 
 
1241
                /* We copy the LRU position field of the previous block
 
1242
                to the new block */
 
1243
 
 
1244
                bpage->LRU_position = (buf_pool->LRU_old)->LRU_position;
1364
1245
        }
1365
1246
 
1366
1247
        ut_d(bpage->in_LRU_list = TRUE);
1367
1248
 
 
1249
        buf_page_set_old(bpage, old);
 
1250
 
1368
1251
        if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
1369
1252
 
1370
1253
                ut_ad(buf_pool->LRU_old);
1371
1254
 
1372
1255
                /* Adjust the length of the old block list if necessary */
1373
1256
 
1374
 
                buf_page_set_old(bpage, old);
1375
 
                buf_LRU_old_adjust_len(buf_pool);
 
1257
                buf_LRU_old_adjust_len();
1376
1258
 
1377
1259
        } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1378
1260
 
1379
1261
                /* The LRU list is now long enough for LRU_old to become
1380
1262
                defined: init it */
1381
1263
 
1382
 
                buf_LRU_old_init(buf_pool);
1383
 
        } else {
1384
 
                buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
 
1264
                buf_LRU_old_init();
1385
1265
        }
1386
1266
 
1387
1267
        /* If this is a zipped block with decompressed frame as well
1415
1295
/*=====================*/
1416
1296
        buf_page_t*     bpage)  /*!< in: control block */
1417
1297
{
1418
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1419
 
 
1420
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1421
 
 
1422
 
        if (bpage->old) {
1423
 
                buf_pool->stat.n_pages_made_young++;
1424
 
        }
1425
 
 
1426
1298
        buf_LRU_remove_block(bpage);
1427
1299
        buf_LRU_add_block_low(bpage, FALSE);
1428
1300
}
1443
1315
Try to free a block.  If bpage is a descriptor of a compressed-only
1444
1316
page, the descriptor object will be freed as well.
1445
1317
 
1446
 
NOTE: If this function returns BUF_LRU_FREED, it will temporarily
1447
 
release buf_pool->mutex.  Furthermore, the page frame will no longer be
 
1318
NOTE: If this function returns BUF_LRU_FREED, it will not temporarily
 
1319
release buf_pool_mutex.  Furthermore, the page frame will no longer be
1448
1320
accessible via bpage.
1449
1321
 
1450
 
The caller must hold buf_pool->mutex and buf_page_get_mutex(bpage) and
 
1322
The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and
1451
1323
release these two mutexes after the call.  No other
1452
1324
buf_page_get_mutex() may be held when calling this function.
1453
1325
@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or
1465
1337
                                was temporarily released, or NULL */
1466
1338
{
1467
1339
        buf_page_t*     b = NULL;
1468
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1469
1340
        mutex_t*        block_mutex = buf_page_get_mutex(bpage);
1470
1341
 
1471
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1342
        ut_ad(buf_pool_mutex_own());
1472
1343
        ut_ad(mutex_own(block_mutex));
1473
1344
        ut_ad(buf_page_in_file(bpage));
1474
1345
        ut_ad(bpage->in_LRU_list);
1475
1346
        ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
1476
 
#if UNIV_WORD_SIZE == 4
1477
 
        /* On 32-bit systems, there is no padding in buf_page_t.  On
1478
 
        other systems, Valgrind could complain about uninitialized pad
1479
 
        bytes. */
1480
1347
        UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1481
 
#endif
1482
1348
 
1483
1349
        if (!buf_page_can_relocate(bpage)) {
1484
1350
 
1512
1378
                If it cannot be allocated (without freeing a block
1513
1379
                from the LRU list), refuse to free bpage. */
1514
1380
alloc:
1515
 
                buf_pool_mutex_exit_forbid(buf_pool);
1516
 
                b = static_cast<buf_page_t *>(buf_buddy_alloc(buf_pool, sizeof *b, NULL));
1517
 
                buf_pool_mutex_exit_allow(buf_pool);
 
1381
                buf_pool_mutex_exit_forbid();
 
1382
                b = buf_buddy_alloc(sizeof *b, NULL);
 
1383
                buf_pool_mutex_exit_allow();
1518
1384
 
1519
1385
                if (UNIV_UNLIKELY(!b)) {
1520
1386
                        return(BUF_LRU_CANNOT_RELOCATE);
1536
1402
                ut_a(bpage->buf_fix_count == 0);
1537
1403
 
1538
1404
                if (b) {
1539
 
                        buf_page_t*     hash_b;
1540
1405
                        buf_page_t*     prev_b  = UT_LIST_GET_PREV(LRU, b);
1541
 
 
1542
 
                        const ulint     fold = buf_page_address_fold(
 
1406
                        const ulint     fold    = buf_page_address_fold(
1543
1407
                                bpage->space, bpage->offset);
1544
1408
 
1545
 
                        hash_b  = buf_page_hash_get_low(
1546
 
                                buf_pool, bpage->space, bpage->offset, fold);
1547
 
 
1548
 
                        ut_a(!hash_b);
 
1409
                        ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
1549
1410
 
1550
1411
                        b->state = b->oldest_modification
1551
1412
                                ? BUF_BLOCK_ZIP_DIRTY
1579
1440
 
1580
1441
                                ut_ad(prev_b->in_LRU_list);
1581
1442
                                ut_ad(buf_page_in_file(prev_b));
1582
 
#if UNIV_WORD_SIZE == 4
1583
 
                                /* On 32-bit systems, there is no
1584
 
                                padding in buf_page_t.  On other
1585
 
                                systems, Valgrind could complain about
1586
 
                                uninitialized pad bytes. */
1587
1443
                                UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b);
1588
 
#endif
 
1444
 
1589
1445
                                UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
1590
1446
                                                     prev_b, b);
1591
1447
 
1597
1453
 
1598
1454
                                                buf_pool->LRU_old = b;
1599
1455
                                        }
 
1456
#ifdef UNIV_LRU_DEBUG
 
1457
                                        ut_a(prev_b->old
 
1458
                                             || !UT_LIST_GET_NEXT(LRU, b)
 
1459
                                             || UT_LIST_GET_NEXT(LRU, b)->old);
 
1460
                                } else {
 
1461
                                        ut_a(!prev_b->old
 
1462
                                             || !UT_LIST_GET_NEXT(LRU, b)
 
1463
                                             || !UT_LIST_GET_NEXT(LRU, b)->old);
 
1464
#endif /* UNIV_LRU_DEBUG */
1600
1465
                                }
1601
1466
 
1602
1467
                                lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
1605
1470
                                        ut_ad(buf_pool->LRU_old);
1606
1471
                                        /* Adjust the length of the
1607
1472
                                        old block list if necessary */
1608
 
                                        buf_LRU_old_adjust_len(buf_pool);
 
1473
                                        buf_LRU_old_adjust_len();
1609
1474
                                } else if (lru_len == BUF_LRU_OLD_MIN_LEN) {
1610
1475
                                        /* The LRU list is now long
1611
1476
                                        enough for LRU_old to become
1612
1477
                                        defined: init it */
1613
 
                                        buf_LRU_old_init(buf_pool);
 
1478
                                        buf_LRU_old_init();
1614
1479
                                }
1615
 
#ifdef UNIV_LRU_DEBUG
1616
 
                                /* Check that the "old" flag is consistent
1617
 
                                in the block and its neighbours. */
1618
 
                                buf_page_set_old(b, buf_page_is_old(b));
1619
 
#endif /* UNIV_LRU_DEBUG */
1620
1480
                        } else {
1621
1481
                                ut_d(b->in_LRU_list = FALSE);
1622
1482
                                buf_LRU_add_block_low(b, buf_page_is_old(b));
1625
1485
                        if (b->state == BUF_BLOCK_ZIP_PAGE) {
1626
1486
                                buf_LRU_insert_zip_clean(b);
1627
1487
                        } else {
1628
 
                                /* Relocate on buf_pool->flush_list. */
1629
 
                                buf_flush_relocate_on_flush_list(bpage, b);
 
1488
                                buf_page_t* prev;
 
1489
 
 
1490
                                ut_ad(b->in_flush_list);
 
1491
                                ut_d(bpage->in_flush_list = FALSE);
 
1492
 
 
1493
                                prev = UT_LIST_GET_PREV(list, b);
 
1494
                                UT_LIST_REMOVE(list, buf_pool->flush_list, b);
 
1495
 
 
1496
                                if (prev) {
 
1497
                                        ut_ad(prev->in_flush_list);
 
1498
                                        UT_LIST_INSERT_AFTER(
 
1499
                                                list,
 
1500
                                                buf_pool->flush_list,
 
1501
                                                prev, b);
 
1502
                                } else {
 
1503
                                        UT_LIST_ADD_FIRST(
 
1504
                                                list,
 
1505
                                                buf_pool->flush_list,
 
1506
                                                b);
 
1507
                                }
1630
1508
                        }
1631
1509
 
1632
1510
                        bpage->zip.data = NULL;
1634
1512
 
1635
1513
                        /* Prevent buf_page_get_gen() from
1636
1514
                        decompressing the block while we release
1637
 
                        buf_pool->mutex and block_mutex. */
 
1515
                        buf_pool_mutex and block_mutex. */
1638
1516
                        b->buf_fix_count++;
1639
1517
                        b->io_fix = BUF_IO_READ;
1640
1518
                }
1643
1521
                        *buf_pool_mutex_released = TRUE;
1644
1522
                }
1645
1523
 
1646
 
                buf_pool_mutex_exit(buf_pool);
 
1524
                buf_pool_mutex_exit();
1647
1525
                mutex_exit(block_mutex);
1648
1526
 
1649
1527
                /* Remove possible adaptive hash index on the page.
1675
1553
                                : BUF_NO_CHECKSUM_MAGIC);
1676
1554
                }
1677
1555
 
1678
 
                buf_pool_mutex_enter(buf_pool);
 
1556
                buf_pool_mutex_enter();
1679
1557
                mutex_enter(block_mutex);
1680
1558
 
1681
1559
                if (b) {
1682
 
                        mutex_enter(&buf_pool->zip_mutex);
 
1560
                        mutex_enter(&buf_pool_zip_mutex);
1683
1561
                        b->buf_fix_count--;
1684
1562
                        buf_page_set_io_fix(b, BUF_IO_NONE);
1685
 
                        mutex_exit(&buf_pool->zip_mutex);
 
1563
                        mutex_exit(&buf_pool_zip_mutex);
1686
1564
                }
1687
1565
 
1688
1566
                buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
1690
1568
                /* The block_mutex should have been released by
1691
1569
                buf_LRU_block_remove_hashed_page() when it returns
1692
1570
                BUF_BLOCK_ZIP_FREE. */
1693
 
                ut_ad(block_mutex == &buf_pool->zip_mutex);
 
1571
                ut_ad(block_mutex == &buf_pool_zip_mutex);
1694
1572
                mutex_enter(block_mutex);
1695
1573
        }
1696
1574
 
1705
1583
/*=============================*/
1706
1584
        buf_block_t*    block)  /*!< in: block, must not contain a file page */
1707
1585
{
1708
 
        void*           data;
1709
 
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
 
1586
        void*   data;
1710
1587
 
1711
1588
        ut_ad(block);
1712
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1589
        ut_ad(buf_pool_mutex_own());
1713
1590
        ut_ad(mutex_own(&block->mutex));
1714
1591
 
1715
1592
        switch (buf_block_get_state(block)) {
1743
1620
        if (data) {
1744
1621
                block->page.zip.data = NULL;
1745
1622
                mutex_exit(&block->mutex);
1746
 
                buf_pool_mutex_exit_forbid(buf_pool);
1747
 
 
1748
 
                buf_buddy_free(
1749
 
                        buf_pool, data, page_zip_get_size(&block->page.zip));
1750
 
 
1751
 
                buf_pool_mutex_exit_allow(buf_pool);
 
1623
                buf_pool_mutex_exit_forbid();
 
1624
                buf_buddy_free(data, page_zip_get_size(&block->page.zip));
 
1625
                buf_pool_mutex_exit_allow();
1752
1626
                mutex_enter(&block->mutex);
1753
1627
                page_zip_set_size(&block->page.zip, 0);
1754
1628
        }
1762
1636
/******************************************************************//**
1763
1637
Takes a block out of the LRU list and page hash table.
1764
1638
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
1765
 
the object will be freed and buf_pool->zip_mutex will be released.
 
1639
the object will be freed and buf_pool_zip_mutex will be released.
1766
1640
 
1767
1641
If a compressed page or a compressed-only block descriptor is freed,
1768
1642
other compressed pages or compressed-only block descriptors may be
1779
1653
        ibool           zip)    /*!< in: TRUE if should remove also the
1780
1654
                                compressed page of an uncompressed page */
1781
1655
{
1782
 
        ulint                   fold;
1783
1656
        const buf_page_t*       hashed_bpage;
1784
 
        buf_pool_t*             buf_pool = buf_pool_from_bpage(bpage);
1785
 
 
1786
1657
        ut_ad(bpage);
1787
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1658
        ut_ad(buf_pool_mutex_own());
1788
1659
        ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1789
1660
 
1790
1661
        ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
1791
1662
        ut_a(bpage->buf_fix_count == 0);
1792
1663
 
1793
 
#if UNIV_WORD_SIZE == 4
1794
 
        /* On 32-bit systems, there is no padding in
1795
 
        buf_page_t.  On other systems, Valgrind could complain
1796
 
        about uninitialized pad bytes. */
1797
1664
        UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1798
 
#endif
1799
1665
 
1800
1666
        buf_LRU_remove_block(bpage);
1801
1667
 
1870
1736
                break;
1871
1737
        }
1872
1738
 
1873
 
        fold = buf_page_address_fold(bpage->space, bpage->offset);
1874
 
        hashed_bpage = buf_page_hash_get_low(
1875
 
                buf_pool, bpage->space, bpage->offset, fold);
 
1739
        hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
1876
1740
 
1877
1741
        if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
1878
1742
                fprintf(stderr,
1892
1756
 
1893
1757
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1894
1758
                mutex_exit(buf_page_get_mutex(bpage));
1895
 
                buf_pool_mutex_exit(buf_pool);
 
1759
                buf_pool_mutex_exit();
1896
1760
                buf_print();
1897
1761
                buf_LRU_print();
1898
1762
                buf_validate();
1904
1768
        ut_ad(!bpage->in_zip_hash);
1905
1769
        ut_ad(bpage->in_page_hash);
1906
1770
        ut_d(bpage->in_page_hash = FALSE);
1907
 
        HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
 
1771
        HASH_DELETE(buf_page_t, hash, buf_pool->page_hash,
 
1772
                    buf_page_address_fold(bpage->space, bpage->offset),
 
1773
                    bpage);
1908
1774
        switch (buf_page_get_state(bpage)) {
1909
1775
        case BUF_BLOCK_ZIP_PAGE:
1910
1776
                ut_ad(!bpage->in_free_list);
1915
1781
 
1916
1782
                UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
1917
1783
 
1918
 
                mutex_exit(&buf_pool->zip_mutex);
1919
 
                buf_pool_mutex_exit_forbid(buf_pool);
1920
 
 
1921
 
                buf_buddy_free(
1922
 
                        buf_pool, bpage->zip.data,
1923
 
                        page_zip_get_size(&bpage->zip));
1924
 
 
1925
 
                buf_buddy_free(buf_pool, bpage, sizeof(*bpage));
1926
 
                buf_pool_mutex_exit_allow(buf_pool);
1927
 
 
 
1784
                mutex_exit(&buf_pool_zip_mutex);
 
1785
                buf_pool_mutex_exit_forbid();
 
1786
                buf_buddy_free(bpage->zip.data,
 
1787
                               page_zip_get_size(&bpage->zip));
 
1788
                buf_buddy_free(bpage, sizeof(*bpage));
 
1789
                buf_pool_mutex_exit_allow();
1928
1790
                UNIV_MEM_UNDESC(bpage);
1929
1791
                return(BUF_BLOCK_ZIP_FREE);
1930
1792
 
1946
1808
                        ut_ad(!bpage->in_flush_list);
1947
1809
                        ut_ad(!bpage->in_LRU_list);
1948
1810
                        mutex_exit(&((buf_block_t*) bpage)->mutex);
1949
 
                        buf_pool_mutex_exit_forbid(buf_pool);
1950
 
 
1951
 
                        buf_buddy_free(
1952
 
                                buf_pool, data,
1953
 
                                page_zip_get_size(&bpage->zip));
1954
 
 
1955
 
                        buf_pool_mutex_exit_allow(buf_pool);
 
1811
                        buf_pool_mutex_exit_forbid();
 
1812
                        buf_buddy_free(data, page_zip_get_size(&bpage->zip));
 
1813
                        buf_pool_mutex_exit_allow();
1956
1814
                        mutex_enter(&((buf_block_t*) bpage)->mutex);
1957
1815
                        page_zip_set_size(&bpage->zip, 0);
1958
1816
                }
1981
1839
        buf_block_t*    block)  /*!< in: block, must contain a file page and
1982
1840
                                be in a state where it can be freed */
1983
1841
{
1984
 
#ifdef UNIV_DEBUG
1985
 
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
1986
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1987
 
#endif
 
1842
        ut_ad(buf_pool_mutex_own());
1988
1843
        ut_ad(mutex_own(&block->mutex));
1989
1844
 
1990
1845
        buf_block_set_state(block, BUF_BLOCK_MEMORY);
1992
1847
        buf_LRU_block_free_non_file_page(block);
1993
1848
}
1994
1849
 
1995
 
/**********************************************************************//**
1996
 
Updates buf_pool->LRU_old_ratio for one buffer pool instance.
1997
 
@return updated old_pct */
1998
 
static
1999
 
uint
2000
 
buf_LRU_old_ratio_update_instance(
2001
 
/*==============================*/
2002
 
        buf_pool_t*     buf_pool,/*!< in: buffer pool instance */
2003
 
        uint            old_pct,/*!< in: Reserve this percentage of
2004
 
                                the buffer pool for "old" blocks. */
2005
 
        ibool           adjust) /*!< in: TRUE=adjust the LRU list;
2006
 
                                FALSE=just assign buf_pool->LRU_old_ratio
2007
 
                                during the initialization of InnoDB */
2008
 
{
2009
 
        uint    ratio;
2010
 
 
2011
 
        ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100;
2012
 
        if (ratio < BUF_LRU_OLD_RATIO_MIN) {
2013
 
                ratio = BUF_LRU_OLD_RATIO_MIN;
2014
 
        } else if (ratio > BUF_LRU_OLD_RATIO_MAX) {
2015
 
                ratio = BUF_LRU_OLD_RATIO_MAX;
2016
 
        }
2017
 
 
2018
 
        if (adjust) {
2019
 
                buf_pool_mutex_enter(buf_pool);
2020
 
 
2021
 
                if (ratio != buf_pool->LRU_old_ratio) {
2022
 
                        buf_pool->LRU_old_ratio = ratio;
2023
 
 
2024
 
                        if (UT_LIST_GET_LEN(buf_pool->LRU)
2025
 
                           >= BUF_LRU_OLD_MIN_LEN) {
2026
 
 
2027
 
                                buf_LRU_old_adjust_len(buf_pool);
2028
 
                        }
2029
 
                }
2030
 
 
2031
 
                buf_pool_mutex_exit(buf_pool);
2032
 
        } else {
2033
 
                buf_pool->LRU_old_ratio = ratio;
2034
 
        }
2035
 
        /* the reverse of 
2036
 
        ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
2037
 
        return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5));
2038
 
}
2039
 
 
2040
 
/**********************************************************************//**
2041
 
Updates buf_pool->LRU_old_ratio.
2042
 
@return updated old_pct */
2043
 
UNIV_INTERN
2044
 
ulint
2045
 
buf_LRU_old_ratio_update(
2046
 
/*=====================*/
2047
 
        uint    old_pct,/*!< in: Reserve this percentage of
2048
 
                        the buffer pool for "old" blocks. */
2049
 
        ibool   adjust) /*!< in: TRUE=adjust the LRU list;
2050
 
                        FALSE=just assign buf_pool->LRU_old_ratio
2051
 
                        during the initialization of InnoDB */
2052
 
{
2053
 
        ulint   i;
2054
 
        ulint   new_ratio = 0;
2055
 
 
2056
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2057
 
                buf_pool_t*     buf_pool;
2058
 
 
2059
 
                buf_pool = buf_pool_from_array(i);
2060
 
 
2061
 
                new_ratio = buf_LRU_old_ratio_update_instance(
2062
 
                        buf_pool, old_pct, adjust);
2063
 
        }
2064
 
 
2065
 
        return(new_ratio);
2066
 
}
2067
 
 
2068
1850
/********************************************************************//**
2069
1851
Update the historical stats that we are collecting for LRU eviction
2070
1852
policy at the end of each interval. */
2073
1855
buf_LRU_stat_update(void)
2074
1856
/*=====================*/
2075
1857
{
2076
 
        ulint           i;
2077
1858
        buf_LRU_stat_t* item;
2078
 
        buf_pool_t*     buf_pool;
2079
 
        ibool           evict_started = FALSE;
2080
1859
 
2081
1860
        /* If we haven't started eviction yet then don't update stats. */
2082
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2083
 
 
2084
 
                buf_pool = buf_pool_from_array(i);
2085
 
 
2086
 
                if (buf_pool->freed_page_clock != 0) {
2087
 
                        evict_started = TRUE;
2088
 
                        break;
2089
 
                }
2090
 
        }
2091
 
 
2092
 
        if (!evict_started) {
 
1861
        if (buf_pool->freed_page_clock == 0) {
2093
1862
                goto func_exit;
2094
1863
        }
2095
1864
 
 
1865
        buf_pool_mutex_enter();
 
1866
 
2096
1867
        /* Update the index. */
2097
1868
        item = &buf_LRU_stat_arr[buf_LRU_stat_arr_ind];
2098
1869
        buf_LRU_stat_arr_ind++;
2105
1876
        /* Put current entry in the array. */
2106
1877
        memcpy(item, &buf_LRU_stat_cur, sizeof *item);
2107
1878
 
 
1879
        buf_pool_mutex_exit();
 
1880
 
2108
1881
func_exit:
2109
1882
        /* Clear the current entry. */
2110
1883
        memset(&buf_LRU_stat_cur, 0, sizeof buf_LRU_stat_cur);
2112
1885
 
2113
1886
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2114
1887
/**********************************************************************//**
2115
 
Validates the LRU list for one buffer pool instance. */
2116
 
static
2117
 
void
2118
 
buf_LRU_validate_instance(
2119
 
/*======================*/
2120
 
        buf_pool_t*     buf_pool)
 
1888
Validates the LRU list.
 
1889
@return TRUE */
 
1890
UNIV_INTERN
 
1891
ibool
 
1892
buf_LRU_validate(void)
 
1893
/*==================*/
2121
1894
{
2122
1895
        buf_page_t*     bpage;
2123
1896
        buf_block_t*    block;
2124
1897
        ulint           old_len;
2125
1898
        ulint           new_len;
 
1899
        ulint           LRU_pos;
2126
1900
 
2127
1901
        ut_ad(buf_pool);
2128
 
        buf_pool_mutex_enter(buf_pool);
 
1902
        buf_pool_mutex_enter();
2129
1903
 
2130
1904
        if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
2131
1905
 
2132
1906
                ut_a(buf_pool->LRU_old);
2133
1907
                old_len = buf_pool->LRU_old_len;
2134
 
                new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
2135
 
                                 * buf_pool->LRU_old_ratio
2136
 
                                 / BUF_LRU_OLD_RATIO_DIV,
2137
 
                                 UT_LIST_GET_LEN(buf_pool->LRU)
2138
 
                                 - (BUF_LRU_OLD_TOLERANCE
2139
 
                                    + BUF_LRU_NON_OLD_MIN_LEN));
 
1908
                new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);
2140
1909
                ut_a(old_len >= new_len - BUF_LRU_OLD_TOLERANCE);
2141
1910
                ut_a(old_len <= new_len + BUF_LRU_OLD_TOLERANCE);
2142
1911
        }
2167
1936
                }
2168
1937
 
2169
1938
                if (buf_page_is_old(bpage)) {
2170
 
                        const buf_page_t*       prev
2171
 
                                = UT_LIST_GET_PREV(LRU, bpage);
2172
 
                        const buf_page_t*       next
2173
 
                                = UT_LIST_GET_NEXT(LRU, bpage);
2174
 
 
2175
 
                        if (!old_len++) {
2176
 
                                ut_a(buf_pool->LRU_old == bpage);
2177
 
                        } else {
2178
 
                                ut_a(!prev || buf_page_is_old(prev));
2179
 
                        }
2180
 
 
2181
 
                        ut_a(!next || buf_page_is_old(next));
2182
 
                }
 
1939
                        old_len++;
 
1940
                }
 
1941
 
 
1942
                if (buf_pool->LRU_old && (old_len == 1)) {
 
1943
                        ut_a(buf_pool->LRU_old == bpage);
 
1944
                }
 
1945
 
 
1946
                LRU_pos = buf_page_get_LRU_position(bpage);
2183
1947
 
2184
1948
                bpage = UT_LIST_GET_NEXT(LRU, bpage);
2185
 
        }
2186
 
 
2187
 
        ut_a(buf_pool->LRU_old_len == old_len);
 
1949
 
 
1950
                if (bpage) {
 
1951
                        /* If the following assert fails, it may
 
1952
                        not be an error: just the buf_pool clock
 
1953
                        has wrapped around */
 
1954
                        ut_a(LRU_pos >= buf_page_get_LRU_position(bpage));
 
1955
                }
 
1956
        }
 
1957
 
 
1958
        if (buf_pool->LRU_old) {
 
1959
                ut_a(buf_pool->LRU_old_len == old_len);
 
1960
        }
2188
1961
 
2189
1962
        UT_LIST_VALIDATE(list, buf_page_t, buf_pool->free,
2190
1963
                         ut_ad(ut_list_node_313->in_free_list));
2209
1982
                ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
2210
1983
        }
2211
1984
 
2212
 
        buf_pool_mutex_exit(buf_pool);
2213
 
}
2214
 
 
2215
 
/**********************************************************************//**
2216
 
Validates the LRU list.
2217
 
@return TRUE */
2218
 
UNIV_INTERN
2219
 
ibool
2220
 
buf_LRU_validate(void)
2221
 
/*==================*/
2222
 
{
2223
 
        ulint   i;
2224
 
 
2225
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2226
 
                buf_pool_t*     buf_pool;
2227
 
 
2228
 
                buf_pool = buf_pool_from_array(i);
2229
 
                buf_LRU_validate_instance(buf_pool);
2230
 
        }
2231
 
 
 
1985
        buf_pool_mutex_exit();
2232
1986
        return(TRUE);
2233
1987
}
2234
1988
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2235
1989
 
2236
1990
#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2237
1991
/**********************************************************************//**
2238
 
Prints the LRU list for one buffer pool instance. */
 
1992
Prints the LRU list. */
2239
1993
UNIV_INTERN
2240
1994
void
2241
 
buf_LRU_print_instance(
2242
 
/*===================*/
2243
 
        buf_pool_t*     buf_pool)
 
1995
buf_LRU_print(void)
 
1996
/*===============*/
2244
1997
{
2245
1998
        const buf_page_t*       bpage;
2246
1999
 
2247
2000
        ut_ad(buf_pool);
2248
 
        buf_pool_mutex_enter(buf_pool);
 
2001
        buf_pool_mutex_enter();
 
2002
 
 
2003
        fprintf(stderr, "Pool ulint clock %lu\n",
 
2004
                (ulong) buf_pool->ulint_clock);
2249
2005
 
2250
2006
        bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
2251
2007
 
2252
2008
        while (bpage != NULL) {
2253
2009
 
2254
 
                mutex_enter(buf_page_get_mutex(bpage));
2255
2010
                fprintf(stderr, "BLOCK space %lu page %lu ",
2256
2011
                        (ulong) buf_page_get_space(bpage),
2257
2012
                        (ulong) buf_page_get_page_no(bpage));
2278
2033
                        const byte*     frame;
2279
2034
                case BUF_BLOCK_FILE_PAGE:
2280
2035
                        frame = buf_block_get_frame((buf_block_t*) bpage);
2281
 
                        fprintf(stderr, "\ntype %lu"
2282
 
                                " index id %llu\n",
 
2036
                        fprintf(stderr, "\nLRU pos %lu type %lu"
 
2037
                                " index id %lu\n",
 
2038
                                (ulong) buf_page_get_LRU_position(bpage),
2283
2039
                                (ulong) fil_page_get_type(frame),
2284
 
                                (ullint) btr_page_get_index_id(frame));
 
2040
                                (ulong) ut_dulint_get_low(
 
2041
                                        btr_page_get_index_id(frame)));
2285
2042
                        break;
2286
2043
                case BUF_BLOCK_ZIP_PAGE:
2287
2044
                        frame = bpage->zip.data;
2288
 
                        fprintf(stderr, "\ntype %lu size %lu"
2289
 
                                " index id %llu\n",
 
2045
                        fprintf(stderr, "\nLRU pos %lu type %lu size %lu"
 
2046
                                " index id %lu\n",
 
2047
                                (ulong) buf_page_get_LRU_position(bpage),
2290
2048
                                (ulong) fil_page_get_type(frame),
2291
2049
                                (ulong) buf_page_get_zip_size(bpage),
2292
 
                                (ullint) btr_page_get_index_id(frame));
 
2050
                                (ulong) ut_dulint_get_low(
 
2051
                                        btr_page_get_index_id(frame)));
2293
2052
                        break;
2294
2053
 
2295
2054
                default:
2296
 
                        fprintf(stderr, "\n!state %lu!\n",
 
2055
                        fprintf(stderr, "\nLRU pos %lu !state %lu!\n",
 
2056
                                (ulong) buf_page_get_LRU_position(bpage),
2297
2057
                                (ulong) buf_page_get_state(bpage));
2298
2058
                        break;
2299
2059
                }
2300
2060
 
2301
 
                mutex_exit(buf_page_get_mutex(bpage));
2302
2061
                bpage = UT_LIST_GET_NEXT(LRU, bpage);
2303
2062
        }
2304
2063
 
2305
 
        buf_pool_mutex_exit(buf_pool);
2306
 
}
2307
 
 
2308
 
/**********************************************************************//**
2309
 
Prints the LRU list. */
2310
 
UNIV_INTERN
2311
 
void
2312
 
buf_LRU_print(void)
2313
 
/*===============*/
2314
 
{
2315
 
        ulint           i;
2316
 
        buf_pool_t*     buf_pool;
2317
 
 
2318
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2319
 
                buf_pool = buf_pool_from_array(i);
2320
 
                buf_LRU_print_instance(buf_pool);
2321
 
        }
 
2064
        buf_pool_mutex_exit();
2322
2065
}
2323
2066
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */