1
1
/*****************************************************************************
3
Copyright (C) 1995, 2010, Innobase Oy. All Rights Reserved.
3
Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved.
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
50
49
#include "log0recv.h"
51
50
#include "srv0srv.h"
53
/** The number of blocks from the LRU_old pointer onward, including
54
the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
55
of the whole LRU list length, except that the tolerance defined below
56
is allowed. Note that the tolerance must be small enough such that for
57
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
58
allowed to point to either end of the LRU list. */
52
/** The number of blocks from the LRU_old pointer onward, including the block
53
pointed to, must be 3/8 of the whole LRU list length, except that the
54
tolerance defined below is allowed. Note that the tolerance must be small
55
enough such that for even the BUF_LRU_OLD_MIN_LEN long LRU list, the
56
LRU_old pointer is not allowed to point to either end of the LRU list. */
60
58
#define BUF_LRU_OLD_TOLERANCE 20
62
/** The minimum amount of non-old blocks when the LRU_old list exists
63
(that is, when there are more than BUF_LRU_OLD_MIN_LEN blocks).
64
@see buf_LRU_old_adjust_len */
65
#define BUF_LRU_NON_OLD_MIN_LEN 5
66
#if BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN
67
# error "BUF_LRU_NON_OLD_MIN_LEN >= BUF_LRU_OLD_MIN_LEN"
60
/** The whole LRU list length is divided by this number to determine an
61
initial segment in buf_LRU_get_recent_limit */
63
#define BUF_LRU_INITIAL_RATIO 8
70
65
/** When dropping the search hash index entries before deleting an ibd
71
66
file, we build a local array of pages belonging to that tablespace
97
92
#define BUF_LRU_IO_TO_UNZIP_FACTOR 50
99
94
/** Sampled values buf_LRU_stat_cur.
100
Not protected by any mutex. Updated by buf_LRU_stat_update(). */
95
Protected by buf_pool_mutex. Updated by buf_LRU_stat_update(). */
101
96
static buf_LRU_stat_t buf_LRU_stat_arr[BUF_LRU_STAT_N_INTERVAL];
103
97
/** Cursor to buf_LRU_stat_arr[] that is updated in a round-robin fashion. */
104
98
static ulint buf_LRU_stat_arr_ind;
108
102
UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_cur;
110
104
/** Running sum of past values of buf_LRU_stat_cur.
111
Updated by buf_LRU_stat_update(). Not Protected by any mutex. */
105
Updated by buf_LRU_stat_update(). Protected by buf_pool_mutex. */
112
106
UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_sum;
116
/** @name Heuristics for detecting index scan @{ */
117
/** Move blocks to "new" LRU list only if the first access was at
118
least this many milliseconds ago. Not protected by any mutex or latch. */
119
UNIV_INTERN uint buf_LRU_old_threshold_ms;
122
110
/******************************************************************//**
123
111
Takes a block out of the LRU list and page hash table.
124
112
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
125
the object will be freed and buf_pool->zip_mutex will be released.
113
the object will be freed and buf_pool_zip_mutex will be released.
127
115
If a compressed page or a compressed-only block descriptor is freed,
128
116
other compressed pages or compressed-only block descriptors may be
153
141
@return TRUE if should use unzip_LRU */
156
buf_LRU_evict_from_unzip_LRU(
157
/*=========================*/
158
buf_pool_t* buf_pool)
144
buf_LRU_evict_from_unzip_LRU(void)
145
/*==============================*/
163
ut_ad(buf_pool_mutex_own(buf_pool));
150
ut_ad(buf_pool_mutex_own());
165
152
/* If the unzip_LRU list is empty, we can only use the LRU. */
166
153
if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
285
270
if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
289
/* Array full. We release the buf_pool->mutex to
273
/* Array full. We release the buf_pool_mutex to
290
274
obey the latching order. */
291
buf_pool_mutex_exit(buf_pool);
293
buf_LRU_drop_page_hash_batch(
294
id, zip_size, page_arr, num_entries);
275
buf_pool_mutex_exit();
277
buf_LRU_drop_page_hash_batch(id, zip_size, page_arr,
298
buf_pool_mutex_enter(buf_pool);
280
buf_pool_mutex_enter();
300
282
mutex_exit(block_mutex);
330
312
/******************************************************************//**
331
Invalidates all pages belonging to a given tablespace inside a specific
332
buffer pool instance when we are deleting the data file(s) of that
313
Invalidates all pages belonging to a given tablespace when we are deleting
314
the data file(s) of that tablespace. */
336
buf_LRU_invalidate_tablespace_buf_pool_instance(
337
/*============================================*/
338
buf_pool_t* buf_pool, /*!< buffer pool instance */
339
ulint id) /*!< in: space id */
317
buf_LRU_invalidate_tablespace(
318
/*==========================*/
319
ulint id) /*!< in: space id */
341
321
buf_page_t* bpage;
324
/* Before we attempt to drop pages one by one we first
325
attempt to drop page hash index entries in batches to make
326
it more efficient. The batching attempt is a best effort
327
attempt and does not guarantee that all pages hash entries
328
will be dropped. We get rid of remaining page hash entries
330
buf_LRU_drop_page_hash_for_tablespace(id);
345
buf_pool_mutex_enter(buf_pool);
333
buf_pool_mutex_enter();
347
335
all_freed = TRUE;
349
337
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
351
339
while (bpage != NULL) {
340
mutex_t* block_mutex = buf_page_get_mutex(bpage);
352
341
buf_page_t* prev_bpage;
353
ibool prev_bpage_buf_fix = FALSE;
355
343
ut_a(buf_page_in_file(bpage));
345
mutex_enter(block_mutex);
357
346
prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
359
/* bpage->space and bpage->io_fix are protected by
360
buf_pool->mutex and block_mutex. It is safe to check
361
them while holding buf_pool->mutex only. */
363
if (buf_page_get_space(bpage) != id) {
364
/* Skip this block, as it does not belong to
365
the space that is being invalidated. */
366
} else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
367
/* We cannot remove this page during this scan
368
yet; maybe the system is currently reading it
369
in, or flushing the modifications to the file */
373
mutex_t* block_mutex = buf_page_get_mutex(bpage);
374
mutex_enter(block_mutex);
376
if (bpage->buf_fix_count > 0) {
348
if (buf_page_get_space(bpage) == id) {
349
if (bpage->buf_fix_count > 0
350
|| buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
378
352
/* We cannot remove this page during
379
353
this scan yet; maybe the system is
393
367
(ulong) buf_page_get_page_no(bpage));
396
if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
397
/* This is a compressed-only block
398
descriptor. Ensure that prev_bpage
399
cannot be relocated when bpage is freed. */
400
if (UNIV_LIKELY(prev_bpage != NULL)) {
401
switch (buf_page_get_state(
403
case BUF_BLOCK_FILE_PAGE:
404
/* Descriptors of uncompressed
405
blocks will not be relocated,
406
because we are holding the
409
case BUF_BLOCK_ZIP_PAGE:
410
case BUF_BLOCK_ZIP_DIRTY:
411
/* Descriptors of compressed-
412
only blocks can be relocated,
413
unless they are buffer-fixed.
414
Because both bpage and
415
prev_bpage are protected by
416
buf_pool_zip_mutex, it is
417
not necessary to acquire
419
ut_ad(&buf_pool->zip_mutex
421
ut_ad(mutex_own(block_mutex));
422
prev_bpage_buf_fix = TRUE;
423
prev_bpage->buf_fix_count++;
429
} else if (((buf_block_t*) bpage)->is_hashed) {
370
if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE
371
&& ((buf_block_t*) bpage)->is_hashed) {
433
buf_pool_mutex_exit(buf_pool);
375
buf_pool_mutex_exit();
435
377
zip_size = buf_page_get_zip_size(bpage);
436
378
page_no = buf_page_get_page_no(bpage);
460
401
/* The block_mutex should have been
461
402
released by buf_LRU_block_remove_hashed_page()
462
403
when it returns BUF_BLOCK_ZIP_FREE. */
463
ut_ad(block_mutex == &buf_pool->zip_mutex);
404
ut_ad(block_mutex == &buf_pool_zip_mutex);
464
405
ut_ad(!mutex_own(block_mutex));
466
if (prev_bpage_buf_fix) {
467
/* We temporarily buffer-fixed
469
buf_buddy_free() could not
470
relocate it, in case it was a
471
compressed-only block
474
mutex_enter(block_mutex);
475
ut_ad(prev_bpage->buf_fix_count > 0);
476
prev_bpage->buf_fix_count--;
477
mutex_exit(block_mutex);
480
goto next_page_no_mutex;
407
/* The compressed block descriptor
408
(bpage) has been deallocated and
409
block_mutex released. Also,
410
buf_buddy_free() may have relocated
411
prev_bpage. Rescan the LRU list. */
413
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
483
mutex_exit(block_mutex);
418
mutex_exit(block_mutex);
487
419
bpage = prev_bpage;
490
buf_pool_mutex_exit(buf_pool);
422
buf_pool_mutex_exit();
492
424
if (!all_freed) {
493
425
os_thread_sleep(20000);
499
431
/******************************************************************//**
500
Invalidates all pages belonging to a given tablespace when we are deleting
501
the data file(s) of that tablespace. */
432
Gets the minimum LRU_position field for the blocks in an initial segment
433
(determined by BUF_LRU_INITIAL_RATIO) of the LRU list. The limit is not
434
guaranteed to be precise, because the ulint_clock may wrap around.
435
@return the limit; zero if could not determine it */
504
buf_LRU_invalidate_tablespace(
438
buf_LRU_get_recent_limit(void)
505
439
/*==========================*/
506
ulint id) /*!< in: space id */
510
/* Before we attempt to drop pages one by one we first
511
attempt to drop page hash index entries in batches to make
512
it more efficient. The batching attempt is a best effort
513
attempt and does not guarantee that all pages hash entries
514
will be dropped. We get rid of remaining page hash entries
516
for (i = 0; i < srv_buf_pool_instances; i++) {
517
buf_pool_t* buf_pool;
519
buf_pool = buf_pool_from_array(i);
520
buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
521
buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
441
const buf_page_t* bpage;
445
buf_pool_mutex_enter();
447
len = UT_LIST_GET_LEN(buf_pool->LRU);
449
if (len < BUF_LRU_OLD_MIN_LEN) {
450
/* The LRU list is too short to do read-ahead */
452
buf_pool_mutex_exit();
457
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
459
limit = buf_page_get_LRU_position(bpage);
460
len /= BUF_LRU_INITIAL_RATIO;
462
buf_pool_mutex_exit();
464
return(limit > len ? (limit - len) : 0);
525
467
/********************************************************************//**
531
473
buf_page_t* bpage) /*!< in: pointer to the block in question */
534
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
536
ut_ad(buf_pool_mutex_own(buf_pool));
477
ut_ad(buf_pool_mutex_own());
537
478
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
539
480
/* Find the first successor of bpage in the LRU list
564
505
buf_LRU_free_from_unzip_LRU_list(
565
506
/*=============================*/
566
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
567
ulint n_iterations) /*!< in: how many times this has
568
been called repeatedly without
569
result: a high value means that
570
we should search farther; we will
571
search n_iterations / 5 of the
572
unzip_LRU list, or nothing if
507
ulint n_iterations) /*!< in: how many times this has been called
508
repeatedly without result: a high value means
509
that we should search farther; we will search
510
n_iterations / 5 of the unzip_LRU list,
511
or nothing if n_iterations >= 5 */
575
513
buf_block_t* block;
578
ut_ad(buf_pool_mutex_own(buf_pool));
516
ut_ad(buf_pool_mutex_own());
580
518
/* Theoratically it should be much easier to find a victim
581
519
from unzip_LRU as we can choose even a dirty block (as we'll
638
576
buf_LRU_free_from_common_LRU_list(
639
577
/*==============================*/
640
buf_pool_t* buf_pool,
642
/*!< in: how many times this has been called
578
ulint n_iterations) /*!< in: how many times this has been called
643
579
repeatedly without result: a high value means
644
580
that we should search farther; if
645
581
n_iterations < 10, then we search
666
601
ut_ad(bpage->in_LRU_list);
668
603
mutex_enter(block_mutex);
669
accessed = buf_page_is_accessed(bpage);
670
604
freed = buf_LRU_free_block(bpage, TRUE, NULL);
671
605
mutex_exit(block_mutex);
674
608
case BUF_LRU_FREED:
675
/* Keep track of pages that are evicted without
676
ever being accessed. This gives us a measure of
677
the effectiveness of readahead */
679
++buf_pool->stat.n_ra_pages_evicted;
683
611
case BUF_LRU_NOT_FREED:
707
635
buf_LRU_search_and_free_block(
708
636
/*==========================*/
709
buf_pool_t* buf_pool,
710
/*!< in: buffer pool instance */
712
/*!< in: how many times this has been called
637
ulint n_iterations) /*!< in: how many times this has been called
713
638
repeatedly without result: a high value means
714
639
that we should search farther; if
715
640
n_iterations < 10, then we search
753
buf_LRU_try_free_flushed_blocks(
754
/*============================*/
755
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
677
buf_LRU_try_free_flushed_blocks(void)
678
/*=================================*/
758
if (buf_pool == NULL) {
761
for (i = 0; i < srv_buf_pool_instances; i++) {
762
buf_pool = buf_pool_from_array(i);
763
buf_LRU_try_free_flushed_blocks(buf_pool);
766
buf_pool_mutex_enter(buf_pool);
768
while (buf_pool->LRU_flush_ended > 0) {
770
buf_pool_mutex_exit(buf_pool);
772
buf_LRU_search_and_free_block(buf_pool, 1);
774
buf_pool_mutex_enter(buf_pool);
777
buf_pool_mutex_exit(buf_pool);
680
buf_pool_mutex_enter();
682
while (buf_pool->LRU_flush_ended > 0) {
684
buf_pool_mutex_exit();
686
buf_LRU_search_and_free_block(1);
688
buf_pool_mutex_enter();
691
buf_pool_mutex_exit();
781
694
/******************************************************************//**
782
Returns TRUE if less than 25 % of the buffer pool in any instance is
783
available. This can be used in heuristics to prevent huge transactions
784
eating up the whole buffer pool for their locks.
695
Returns TRUE if less than 25 % of the buffer pool is available. This can be
696
used in heuristics to prevent huge transactions eating up the whole buffer
697
pool for their locks.
785
698
@return TRUE if less than 25 % of buffer pool left */
788
701
buf_LRU_buf_pool_running_out(void)
789
702
/*==============================*/
794
for (i = 0; i < srv_buf_pool_instances && !ret; i++) {
795
buf_pool_t* buf_pool;
797
buf_pool = buf_pool_from_array(i);
799
buf_pool_mutex_enter(buf_pool);
801
if (!recv_recovery_on
802
&& UT_LIST_GET_LEN(buf_pool->free)
803
+ UT_LIST_GET_LEN(buf_pool->LRU)
804
< buf_pool->curr_size / 4) {
809
buf_pool_mutex_exit(buf_pool);
706
buf_pool_mutex_enter();
708
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
709
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) {
714
buf_pool_mutex_exit();
818
722
@return a free control block, or NULL if the buf_block->free list is empty */
821
buf_LRU_get_free_only(
822
/*==================*/
823
buf_pool_t* buf_pool)
725
buf_LRU_get_free_only(void)
726
/*=======================*/
825
728
buf_block_t* block;
827
ut_ad(buf_pool_mutex_own(buf_pool));
730
ut_ad(buf_pool_mutex_own());
829
732
block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
833
735
ut_ad(block->page.in_free_list);
834
736
ut_d(block->page.in_free_list = FALSE);
835
737
ut_ad(!block->page.in_flush_list);
860
760
buf_LRU_get_free_block(
861
761
/*===================*/
862
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
863
ulint zip_size) /*!< in: compressed page size in bytes,
864
or 0 if uncompressed tablespace */
762
ulint zip_size) /*!< in: compressed page size in bytes,
763
or 0 if uncompressed tablespace */
866
765
buf_block_t* block = NULL;
951
848
if (UNIV_UNLIKELY(zip_size)) {
953
850
page_zip_set_size(&block->page.zip, zip_size);
955
block->page.zip.data = static_cast<unsigned char *>(buf_buddy_alloc(
956
buf_pool, zip_size, &lru));
851
block->page.zip.data = buf_buddy_alloc(zip_size, &lru);
958
852
UNIV_MEM_DESC(block->page.zip.data, zip_size, block);
960
854
page_zip_set_size(&block->page.zip, 0);
961
855
block->page.zip.data = NULL;
964
buf_pool_mutex_exit(buf_pool);
858
buf_pool_mutex_exit();
966
860
if (started_monitor) {
967
861
srv_print_innodb_monitor = mon_value_was;
973
867
/* If no block was in the free list, search from the end of the LRU
974
868
list and try to free a block there */
976
buf_pool_mutex_exit(buf_pool);
870
buf_pool_mutex_exit();
978
freed = buf_LRU_search_and_free_block(buf_pool, n_iterations);
872
freed = buf_LRU_search_and_free_block(n_iterations);
1018
912
/* No free block was found: try to flush the LRU list */
1020
buf_flush_free_margin(buf_pool);
914
buf_flush_free_margin();
1021
915
++srv_buf_pool_wait_free;
1023
917
os_aio_simulated_wake_handler_threads();
1025
buf_pool_mutex_enter(buf_pool);
919
buf_pool_mutex_enter();
1027
921
if (buf_pool->LRU_flush_ended > 0) {
1028
922
/* We have written pages in an LRU flush. To make the insert
1029
923
buffer more efficient, we try to move these pages to the free
1032
buf_pool_mutex_exit(buf_pool);
926
buf_pool_mutex_exit();
1034
buf_LRU_try_free_flushed_blocks(buf_pool);
928
buf_LRU_try_free_flushed_blocks();
1036
buf_pool_mutex_exit(buf_pool);
930
buf_pool_mutex_exit();
1039
933
if (n_iterations > 10) {
1051
945
is inside the allowed limits. */
1054
buf_LRU_old_adjust_len(
1055
/*===================*/
1056
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
948
buf_LRU_old_adjust_len(void)
949
/*========================*/
1061
954
ut_a(buf_pool->LRU_old);
1062
ut_ad(buf_pool_mutex_own(buf_pool));
1063
ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
1064
ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
1065
#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
1066
# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)"
955
ut_ad(buf_pool_mutex_own());
956
#if 3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5
957
# error "3 * (BUF_LRU_OLD_MIN_LEN / 8) <= BUF_LRU_OLD_TOLERANCE + 5"
1068
959
#ifdef UNIV_LRU_DEBUG
1069
960
/* buf_pool->LRU_old must be the first item in the LRU list
1075
966
|| UT_LIST_GET_NEXT(LRU, buf_pool->LRU_old)->old);
1076
967
#endif /* UNIV_LRU_DEBUG */
1078
old_len = buf_pool->LRU_old_len;
1079
new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1080
* buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
1081
UT_LIST_GET_LEN(buf_pool->LRU)
1082
- (BUF_LRU_OLD_TOLERANCE
1083
+ BUF_LRU_NON_OLD_MIN_LEN));
1086
buf_page_t* LRU_old = buf_pool->LRU_old;
970
old_len = buf_pool->LRU_old_len;
971
new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);
1089
ut_ad(LRU_old->in_LRU_list);
973
ut_ad(buf_pool->LRU_old->in_LRU_list);
974
ut_a(buf_pool->LRU_old);
1090
975
#ifdef UNIV_LRU_DEBUG
976
ut_a(buf_pool->LRU_old->old);
1092
977
#endif /* UNIV_LRU_DEBUG */
1094
979
/* Update the LRU_old pointer if necessary */
1096
if (old_len + BUF_LRU_OLD_TOLERANCE < new_len) {
981
if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) {
1098
buf_pool->LRU_old = LRU_old = UT_LIST_GET_PREV(
983
buf_pool->LRU_old = UT_LIST_GET_PREV(
984
LRU, buf_pool->LRU_old);
1100
985
#ifdef UNIV_LRU_DEBUG
1101
ut_a(!LRU_old->old);
986
ut_a(!buf_pool->LRU_old->old);
1102
987
#endif /* UNIV_LRU_DEBUG */
1103
old_len = ++buf_pool->LRU_old_len;
1104
buf_page_set_old(LRU_old, TRUE);
988
buf_page_set_old(buf_pool->LRU_old, TRUE);
989
buf_pool->LRU_old_len++;
1106
991
} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
1108
buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU, LRU_old);
1109
old_len = --buf_pool->LRU_old_len;
1110
buf_page_set_old(LRU_old, FALSE);
993
buf_page_set_old(buf_pool->LRU_old, FALSE);
994
buf_pool->LRU_old = UT_LIST_GET_NEXT(
995
LRU, buf_pool->LRU_old);
996
buf_pool->LRU_old_len--;
1119
1005
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
1124
buf_pool_t* buf_pool)
1008
buf_LRU_old_init(void)
1009
/*==================*/
1126
1011
buf_page_t* bpage;
1128
ut_ad(buf_pool_mutex_own(buf_pool));
1013
ut_ad(buf_pool_mutex_own());
1129
1014
ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
1131
1016
/* We first initialize all blocks in the LRU list as old and then use
1132
1017
the adjust function to move the LRU_old pointer to the right
1135
for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage != NULL;
1136
bpage = UT_LIST_GET_PREV(LRU, bpage)) {
1020
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
1022
while (bpage != NULL) {
1137
1023
ut_ad(bpage->in_LRU_list);
1138
ut_ad(buf_page_in_file(bpage));
1139
/* This loop temporarily violates the
1140
assertions of buf_page_set_old(). */
1024
buf_page_set_old(bpage, TRUE);
1025
bpage = UT_LIST_GET_NEXT(LRU, bpage);
1144
1028
buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
1145
1029
buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
1147
buf_LRU_old_adjust_len(buf_pool);
1031
buf_LRU_old_adjust_len();
1150
1034
/******************************************************************//**
1155
1039
/*=================================*/
1156
1040
buf_page_t* bpage) /*!< in/out: control block */
1158
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1160
1042
ut_ad(buf_pool);
1162
1044
ut_ad(buf_page_in_file(bpage));
1163
ut_ad(buf_pool_mutex_own(buf_pool));
1045
ut_ad(buf_pool_mutex_own());
1165
1047
if (buf_page_belongs_to_unzip_LRU(bpage)) {
1166
1048
buf_block_t* block = (buf_block_t*) bpage;
1196
1076
if (UNIV_UNLIKELY(bpage == buf_pool->LRU_old)) {
1198
/* Below: the previous block is guaranteed to exist,
1199
because the LRU_old pointer is only allowed to differ
1200
by BUF_LRU_OLD_TOLERANCE from strict
1201
buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
1203
buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
1078
/* Below: the previous block is guaranteed to exist, because
1079
the LRU_old pointer is only allowed to differ by the
1080
tolerance value from strict 3/8 of the LRU list length. */
1082
buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, bpage);
1083
ut_a(buf_pool->LRU_old);
1206
1084
#ifdef UNIV_LRU_DEBUG
1207
ut_a(!prev_bpage->old);
1085
ut_a(!buf_pool->LRU_old->old);
1208
1086
#endif /* UNIV_LRU_DEBUG */
1209
buf_pool->LRU_old = prev_bpage;
1210
buf_page_set_old(prev_bpage, TRUE);
1087
buf_page_set_old(buf_pool->LRU_old, TRUE);
1212
1089
buf_pool->LRU_old_len++;
1219
1096
buf_unzip_LRU_remove_block_if_needed(bpage);
1221
/* If the LRU list is so short that LRU_old is not defined,
1222
clear the "old" flags and return */
1098
/* If the LRU list is so short that LRU_old not defined, return */
1223
1099
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
1225
for (bpage = UT_LIST_GET_FIRST(buf_pool->LRU); bpage != NULL;
1226
bpage = UT_LIST_GET_NEXT(LRU, bpage)) {
1227
/* This loop temporarily violates the
1228
assertions of buf_page_set_old(). */
1232
1101
buf_pool->LRU_old = NULL;
1233
buf_pool->LRU_old_len = 0;
1257
1125
ibool old) /*!< in: TRUE if should be put to the end
1258
1126
of the list, else put to the start */
1260
buf_pool_t* buf_pool = buf_pool_from_block(block);
1262
1128
ut_ad(buf_pool);
1264
ut_ad(buf_pool_mutex_own(buf_pool));
1130
ut_ad(buf_pool_mutex_own());
1266
1132
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
1283
1149
/*=========================*/
1284
1150
buf_page_t* bpage) /*!< in: control block */
1286
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1152
buf_page_t* last_bpage;
1288
1154
ut_ad(buf_pool);
1290
ut_ad(buf_pool_mutex_own(buf_pool));
1156
ut_ad(buf_pool_mutex_own());
1292
1158
ut_a(buf_page_in_file(bpage));
1160
last_bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1163
bpage->LRU_position = last_bpage->LRU_position;
1165
bpage->LRU_position = buf_pool_clock_tic();
1294
1168
ut_ad(!bpage->in_LRU_list);
1295
1169
UT_LIST_ADD_LAST(LRU, buf_pool->LRU, bpage);
1296
1170
ut_d(bpage->in_LRU_list = TRUE);
1172
buf_page_set_old(bpage, TRUE);
1174
if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
1176
buf_pool->LRU_old_len++;
1298
1179
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
1300
1181
ut_ad(buf_pool->LRU_old);
1302
1183
/* Adjust the length of the old block list if necessary */
1304
buf_page_set_old(bpage, TRUE);
1305
buf_pool->LRU_old_len++;
1306
buf_LRU_old_adjust_len(buf_pool);
1185
buf_LRU_old_adjust_len();
1308
1187
} else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1310
1189
/* The LRU list is now long enough for LRU_old to become
1311
1190
defined: init it */
1313
buf_LRU_old_init(buf_pool);
1315
buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1318
1195
/* If this is a zipped block with decompressed frame as well
1334
1211
LRU list is very short, the block is added to
1335
1212
the start, regardless of this parameter */
1337
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1339
1214
ut_ad(buf_pool);
1341
ut_ad(buf_pool_mutex_own(buf_pool));
1216
ut_ad(buf_pool_mutex_own());
1343
1218
ut_a(buf_page_in_file(bpage));
1344
1219
ut_ad(!bpage->in_LRU_list);
1361
1237
UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU, buf_pool->LRU_old,
1363
1239
buf_pool->LRU_old_len++;
1241
/* We copy the LRU position field of the previous block
1244
bpage->LRU_position = (buf_pool->LRU_old)->LRU_position;
1366
1247
ut_d(bpage->in_LRU_list = TRUE);
1249
buf_page_set_old(bpage, old);
1368
1251
if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {
1370
1253
ut_ad(buf_pool->LRU_old);
1372
1255
/* Adjust the length of the old block list if necessary */
1374
buf_page_set_old(bpage, old);
1375
buf_LRU_old_adjust_len(buf_pool);
1257
buf_LRU_old_adjust_len();
1377
1259
} else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1379
1261
/* The LRU list is now long enough for LRU_old to become
1380
1262
defined: init it */
1382
buf_LRU_old_init(buf_pool);
1384
buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1387
1267
/* If this is a zipped block with decompressed frame as well
1415
1295
/*=====================*/
1416
1296
buf_page_t* bpage) /*!< in: control block */
1418
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1420
ut_ad(buf_pool_mutex_own(buf_pool));
1423
buf_pool->stat.n_pages_made_young++;
1426
1298
buf_LRU_remove_block(bpage);
1427
1299
buf_LRU_add_block_low(bpage, FALSE);
1443
1315
Try to free a block. If bpage is a descriptor of a compressed-only
1444
1316
page, the descriptor object will be freed as well.
1446
NOTE: If this function returns BUF_LRU_FREED, it will temporarily
1447
release buf_pool->mutex. Furthermore, the page frame will no longer be
1318
NOTE: If this function returns BUF_LRU_FREED, it will not temporarily
1319
release buf_pool_mutex. Furthermore, the page frame will no longer be
1448
1320
accessible via bpage.
1450
The caller must hold buf_pool->mutex and buf_page_get_mutex(bpage) and
1322
The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and
1451
1323
release these two mutexes after the call. No other
1452
1324
buf_page_get_mutex() may be held when calling this function.
1453
1325
@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or
1465
1337
was temporarily released, or NULL */
1467
1339
buf_page_t* b = NULL;
1468
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1469
1340
mutex_t* block_mutex = buf_page_get_mutex(bpage);
1471
ut_ad(buf_pool_mutex_own(buf_pool));
1342
ut_ad(buf_pool_mutex_own());
1472
1343
ut_ad(mutex_own(block_mutex));
1473
1344
ut_ad(buf_page_in_file(bpage));
1474
1345
ut_ad(bpage->in_LRU_list);
1475
1346
ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
1476
#if UNIV_WORD_SIZE == 4
1477
/* On 32-bit systems, there is no padding in buf_page_t. On
1478
other systems, Valgrind could complain about uninitialized pad
1480
1347
UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1483
1349
if (!buf_page_can_relocate(bpage)) {
1512
1378
If it cannot be allocated (without freeing a block
1513
1379
from the LRU list), refuse to free bpage. */
1515
buf_pool_mutex_exit_forbid(buf_pool);
1516
b = static_cast<buf_page_t *>(buf_buddy_alloc(buf_pool, sizeof *b, NULL));
1517
buf_pool_mutex_exit_allow(buf_pool);
1381
buf_pool_mutex_exit_forbid();
1382
b = buf_buddy_alloc(sizeof *b, NULL);
1383
buf_pool_mutex_exit_allow();
1519
1385
if (UNIV_UNLIKELY(!b)) {
1520
1386
return(BUF_LRU_CANNOT_RELOCATE);
1536
1402
ut_a(bpage->buf_fix_count == 0);
1540
1405
buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, b);
1542
const ulint fold = buf_page_address_fold(
1406
const ulint fold = buf_page_address_fold(
1543
1407
bpage->space, bpage->offset);
1545
hash_b = buf_page_hash_get_low(
1546
buf_pool, bpage->space, bpage->offset, fold);
1409
ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
1550
1411
b->state = b->oldest_modification
1551
1412
? BUF_BLOCK_ZIP_DIRTY
1580
1441
ut_ad(prev_b->in_LRU_list);
1581
1442
ut_ad(buf_page_in_file(prev_b));
1582
#if UNIV_WORD_SIZE == 4
1583
/* On 32-bit systems, there is no
1584
padding in buf_page_t. On other
1585
systems, Valgrind could complain about
1586
uninitialized pad bytes. */
1587
1443
UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b);
1589
1445
UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
1598
1454
buf_pool->LRU_old = b;
1456
#ifdef UNIV_LRU_DEBUG
1458
|| !UT_LIST_GET_NEXT(LRU, b)
1459
|| UT_LIST_GET_NEXT(LRU, b)->old);
1462
|| !UT_LIST_GET_NEXT(LRU, b)
1463
|| !UT_LIST_GET_NEXT(LRU, b)->old);
1464
#endif /* UNIV_LRU_DEBUG */
1602
1467
lru_len = UT_LIST_GET_LEN(buf_pool->LRU);
1605
1470
ut_ad(buf_pool->LRU_old);
1606
1471
/* Adjust the length of the
1607
1472
old block list if necessary */
1608
buf_LRU_old_adjust_len(buf_pool);
1473
buf_LRU_old_adjust_len();
1609
1474
} else if (lru_len == BUF_LRU_OLD_MIN_LEN) {
1610
1475
/* The LRU list is now long
1611
1476
enough for LRU_old to become
1612
1477
defined: init it */
1613
buf_LRU_old_init(buf_pool);
1615
#ifdef UNIV_LRU_DEBUG
1616
/* Check that the "old" flag is consistent
1617
in the block and its neighbours. */
1618
buf_page_set_old(b, buf_page_is_old(b));
1619
#endif /* UNIV_LRU_DEBUG */
1621
1481
ut_d(b->in_LRU_list = FALSE);
1622
1482
buf_LRU_add_block_low(b, buf_page_is_old(b));
1625
1485
if (b->state == BUF_BLOCK_ZIP_PAGE) {
1626
1486
buf_LRU_insert_zip_clean(b);
1628
/* Relocate on buf_pool->flush_list. */
1629
buf_flush_relocate_on_flush_list(bpage, b);
1490
ut_ad(b->in_flush_list);
1491
ut_d(bpage->in_flush_list = FALSE);
1493
prev = UT_LIST_GET_PREV(list, b);
1494
UT_LIST_REMOVE(list, buf_pool->flush_list, b);
1497
ut_ad(prev->in_flush_list);
1498
UT_LIST_INSERT_AFTER(
1500
buf_pool->flush_list,
1505
buf_pool->flush_list,
1632
1510
bpage->zip.data = NULL;
1675
1553
: BUF_NO_CHECKSUM_MAGIC);
1678
buf_pool_mutex_enter(buf_pool);
1556
buf_pool_mutex_enter();
1679
1557
mutex_enter(block_mutex);
1682
mutex_enter(&buf_pool->zip_mutex);
1560
mutex_enter(&buf_pool_zip_mutex);
1683
1561
b->buf_fix_count--;
1684
1562
buf_page_set_io_fix(b, BUF_IO_NONE);
1685
mutex_exit(&buf_pool->zip_mutex);
1563
mutex_exit(&buf_pool_zip_mutex);
1688
1566
buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
1705
1583
/*=============================*/
1706
1584
buf_block_t* block) /*!< in: block, must not contain a file page */
1709
buf_pool_t* buf_pool = buf_pool_from_block(block);
1712
ut_ad(buf_pool_mutex_own(buf_pool));
1589
ut_ad(buf_pool_mutex_own());
1713
1590
ut_ad(mutex_own(&block->mutex));
1715
1592
switch (buf_block_get_state(block)) {
1744
1621
block->page.zip.data = NULL;
1745
1622
mutex_exit(&block->mutex);
1746
buf_pool_mutex_exit_forbid(buf_pool);
1749
buf_pool, data, page_zip_get_size(&block->page.zip));
1751
buf_pool_mutex_exit_allow(buf_pool);
1623
buf_pool_mutex_exit_forbid();
1624
buf_buddy_free(data, page_zip_get_size(&block->page.zip));
1625
buf_pool_mutex_exit_allow();
1752
1626
mutex_enter(&block->mutex);
1753
1627
page_zip_set_size(&block->page.zip, 0);
1762
1636
/******************************************************************//**
1763
1637
Takes a block out of the LRU list and page hash table.
1764
1638
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
1765
the object will be freed and buf_pool->zip_mutex will be released.
1639
the object will be freed and buf_pool_zip_mutex will be released.
1767
1641
If a compressed page or a compressed-only block descriptor is freed,
1768
1642
other compressed pages or compressed-only block descriptors may be
1779
1653
ibool zip) /*!< in: TRUE if should remove also the
1780
1654
compressed page of an uncompressed page */
1783
1656
const buf_page_t* hashed_bpage;
1784
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1787
ut_ad(buf_pool_mutex_own(buf_pool));
1658
ut_ad(buf_pool_mutex_own());
1788
1659
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1790
1661
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
1791
1662
ut_a(bpage->buf_fix_count == 0);
1793
#if UNIV_WORD_SIZE == 4
1794
/* On 32-bit systems, there is no padding in
1795
buf_page_t. On other systems, Valgrind could complain
1796
about uninitialized pad bytes. */
1797
1664
UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1800
1666
buf_LRU_remove_block(bpage);
1873
fold = buf_page_address_fold(bpage->space, bpage->offset);
1874
hashed_bpage = buf_page_hash_get_low(
1875
buf_pool, bpage->space, bpage->offset, fold);
1739
hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
1877
1741
if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
1878
1742
fprintf(stderr,
1904
1768
ut_ad(!bpage->in_zip_hash);
1905
1769
ut_ad(bpage->in_page_hash);
1906
1770
ut_d(bpage->in_page_hash = FALSE);
1907
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
1771
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash,
1772
buf_page_address_fold(bpage->space, bpage->offset),
1908
1774
switch (buf_page_get_state(bpage)) {
1909
1775
case BUF_BLOCK_ZIP_PAGE:
1910
1776
ut_ad(!bpage->in_free_list);
1916
1782
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
1918
mutex_exit(&buf_pool->zip_mutex);
1919
buf_pool_mutex_exit_forbid(buf_pool);
1922
buf_pool, bpage->zip.data,
1923
page_zip_get_size(&bpage->zip));
1925
buf_buddy_free(buf_pool, bpage, sizeof(*bpage));
1926
buf_pool_mutex_exit_allow(buf_pool);
1784
mutex_exit(&buf_pool_zip_mutex);
1785
buf_pool_mutex_exit_forbid();
1786
buf_buddy_free(bpage->zip.data,
1787
page_zip_get_size(&bpage->zip));
1788
buf_buddy_free(bpage, sizeof(*bpage));
1789
buf_pool_mutex_exit_allow();
1928
1790
UNIV_MEM_UNDESC(bpage);
1929
1791
return(BUF_BLOCK_ZIP_FREE);
1946
1808
ut_ad(!bpage->in_flush_list);
1947
1809
ut_ad(!bpage->in_LRU_list);
1948
1810
mutex_exit(&((buf_block_t*) bpage)->mutex);
1949
buf_pool_mutex_exit_forbid(buf_pool);
1953
page_zip_get_size(&bpage->zip));
1955
buf_pool_mutex_exit_allow(buf_pool);
1811
buf_pool_mutex_exit_forbid();
1812
buf_buddy_free(data, page_zip_get_size(&bpage->zip));
1813
buf_pool_mutex_exit_allow();
1956
1814
mutex_enter(&((buf_block_t*) bpage)->mutex);
1957
1815
page_zip_set_size(&bpage->zip, 0);
1981
1839
buf_block_t* block) /*!< in: block, must contain a file page and
1982
1840
be in a state where it can be freed */
1985
buf_pool_t* buf_pool = buf_pool_from_block(block);
1986
ut_ad(buf_pool_mutex_own(buf_pool));
1842
ut_ad(buf_pool_mutex_own());
1988
1843
ut_ad(mutex_own(&block->mutex));
1990
1845
buf_block_set_state(block, BUF_BLOCK_MEMORY);
1992
1847
buf_LRU_block_free_non_file_page(block);
1995
/**********************************************************************//**
1996
Updates buf_pool->LRU_old_ratio for one buffer pool instance.
1997
@return updated old_pct */
2000
buf_LRU_old_ratio_update_instance(
2001
/*==============================*/
2002
buf_pool_t* buf_pool,/*!< in: buffer pool instance */
2003
uint old_pct,/*!< in: Reserve this percentage of
2004
the buffer pool for "old" blocks. */
2005
ibool adjust) /*!< in: TRUE=adjust the LRU list;
2006
FALSE=just assign buf_pool->LRU_old_ratio
2007
during the initialization of InnoDB */
2011
ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100;
2012
if (ratio < BUF_LRU_OLD_RATIO_MIN) {
2013
ratio = BUF_LRU_OLD_RATIO_MIN;
2014
} else if (ratio > BUF_LRU_OLD_RATIO_MAX) {
2015
ratio = BUF_LRU_OLD_RATIO_MAX;
2019
buf_pool_mutex_enter(buf_pool);
2021
if (ratio != buf_pool->LRU_old_ratio) {
2022
buf_pool->LRU_old_ratio = ratio;
2024
if (UT_LIST_GET_LEN(buf_pool->LRU)
2025
>= BUF_LRU_OLD_MIN_LEN) {
2027
buf_LRU_old_adjust_len(buf_pool);
2031
buf_pool_mutex_exit(buf_pool);
2033
buf_pool->LRU_old_ratio = ratio;
2036
ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
2037
return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5));
2040
/**********************************************************************//**
2041
Updates buf_pool->LRU_old_ratio.
2042
@return updated old_pct */
2045
buf_LRU_old_ratio_update(
2046
/*=====================*/
2047
uint old_pct,/*!< in: Reserve this percentage of
2048
the buffer pool for "old" blocks. */
2049
ibool adjust) /*!< in: TRUE=adjust the LRU list;
2050
FALSE=just assign buf_pool->LRU_old_ratio
2051
during the initialization of InnoDB */
2054
ulint new_ratio = 0;
2056
for (i = 0; i < srv_buf_pool_instances; i++) {
2057
buf_pool_t* buf_pool;
2059
buf_pool = buf_pool_from_array(i);
2061
new_ratio = buf_LRU_old_ratio_update_instance(
2062
buf_pool, old_pct, adjust);
2068
1850
/********************************************************************//**
2069
1851
Update the historical stats that we are collecting for LRU eviction
2070
1852
policy at the end of each interval. */
2073
1855
buf_LRU_stat_update(void)
2074
1856
/*=====================*/
2077
1858
buf_LRU_stat_t* item;
2078
buf_pool_t* buf_pool;
2079
ibool evict_started = FALSE;
2081
1860
/* If we haven't started eviction yet then don't update stats. */
2082
for (i = 0; i < srv_buf_pool_instances; i++) {
2084
buf_pool = buf_pool_from_array(i);
2086
if (buf_pool->freed_page_clock != 0) {
2087
evict_started = TRUE;
2092
if (!evict_started) {
1861
if (buf_pool->freed_page_clock == 0) {
2093
1862
goto func_exit;
1865
buf_pool_mutex_enter();
2096
1867
/* Update the index. */
2097
1868
item = &buf_LRU_stat_arr[buf_LRU_stat_arr_ind];
2098
1869
buf_LRU_stat_arr_ind++;
2113
1886
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2114
1887
/**********************************************************************//**
2115
Validates the LRU list for one buffer pool instance. */
2118
buf_LRU_validate_instance(
2119
/*======================*/
2120
buf_pool_t* buf_pool)
1888
Validates the LRU list.
1892
buf_LRU_validate(void)
1893
/*==================*/
2122
1895
buf_page_t* bpage;
2123
1896
buf_block_t* block;
2127
1901
ut_ad(buf_pool);
2128
buf_pool_mutex_enter(buf_pool);
1902
buf_pool_mutex_enter();
2130
1904
if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
2132
1906
ut_a(buf_pool->LRU_old);
2133
1907
old_len = buf_pool->LRU_old_len;
2134
new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
2135
* buf_pool->LRU_old_ratio
2136
/ BUF_LRU_OLD_RATIO_DIV,
2137
UT_LIST_GET_LEN(buf_pool->LRU)
2138
- (BUF_LRU_OLD_TOLERANCE
2139
+ BUF_LRU_NON_OLD_MIN_LEN));
1908
new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);
2140
1909
ut_a(old_len >= new_len - BUF_LRU_OLD_TOLERANCE);
2141
1910
ut_a(old_len <= new_len + BUF_LRU_OLD_TOLERANCE);
2169
1938
if (buf_page_is_old(bpage)) {
2170
const buf_page_t* prev
2171
= UT_LIST_GET_PREV(LRU, bpage);
2172
const buf_page_t* next
2173
= UT_LIST_GET_NEXT(LRU, bpage);
2176
ut_a(buf_pool->LRU_old == bpage);
2178
ut_a(!prev || buf_page_is_old(prev));
2181
ut_a(!next || buf_page_is_old(next));
1942
if (buf_pool->LRU_old && (old_len == 1)) {
1943
ut_a(buf_pool->LRU_old == bpage);
1946
LRU_pos = buf_page_get_LRU_position(bpage);
2184
1948
bpage = UT_LIST_GET_NEXT(LRU, bpage);
2187
ut_a(buf_pool->LRU_old_len == old_len);
1951
/* If the following assert fails, it may
1952
not be an error: just the buf_pool clock
1953
has wrapped around */
1954
ut_a(LRU_pos >= buf_page_get_LRU_position(bpage));
1958
if (buf_pool->LRU_old) {
1959
ut_a(buf_pool->LRU_old_len == old_len);
2189
1962
UT_LIST_VALIDATE(list, buf_page_t, buf_pool->free,
2190
1963
ut_ad(ut_list_node_313->in_free_list));
2209
1982
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
2212
buf_pool_mutex_exit(buf_pool);
2215
/**********************************************************************//**
2216
Validates the LRU list.
2220
buf_LRU_validate(void)
2221
/*==================*/
2225
for (i = 0; i < srv_buf_pool_instances; i++) {
2226
buf_pool_t* buf_pool;
2228
buf_pool = buf_pool_from_array(i);
2229
buf_LRU_validate_instance(buf_pool);
1985
buf_pool_mutex_exit();
2234
1988
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2236
1990
#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2237
1991
/**********************************************************************//**
2238
Prints the LRU list for one buffer pool instance. */
1992
Prints the LRU list. */
2241
buf_LRU_print_instance(
2242
/*===================*/
2243
buf_pool_t* buf_pool)
2245
1998
const buf_page_t* bpage;
2247
2000
ut_ad(buf_pool);
2248
buf_pool_mutex_enter(buf_pool);
2001
buf_pool_mutex_enter();
2003
fprintf(stderr, "Pool ulint clock %lu\n",
2004
(ulong) buf_pool->ulint_clock);
2250
2006
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
2252
2008
while (bpage != NULL) {
2254
mutex_enter(buf_page_get_mutex(bpage));
2255
2010
fprintf(stderr, "BLOCK space %lu page %lu ",
2256
2011
(ulong) buf_page_get_space(bpage),
2257
2012
(ulong) buf_page_get_page_no(bpage));
2278
2033
const byte* frame;
2279
2034
case BUF_BLOCK_FILE_PAGE:
2280
2035
frame = buf_block_get_frame((buf_block_t*) bpage);
2281
fprintf(stderr, "\ntype %lu"
2036
fprintf(stderr, "\nLRU pos %lu type %lu"
2038
(ulong) buf_page_get_LRU_position(bpage),
2283
2039
(ulong) fil_page_get_type(frame),
2284
(ullint) btr_page_get_index_id(frame));
2040
(ulong) ut_dulint_get_low(
2041
btr_page_get_index_id(frame)));
2286
2043
case BUF_BLOCK_ZIP_PAGE:
2287
2044
frame = bpage->zip.data;
2288
fprintf(stderr, "\ntype %lu size %lu"
2045
fprintf(stderr, "\nLRU pos %lu type %lu size %lu"
2047
(ulong) buf_page_get_LRU_position(bpage),
2290
2048
(ulong) fil_page_get_type(frame),
2291
2049
(ulong) buf_page_get_zip_size(bpage),
2292
(ullint) btr_page_get_index_id(frame));
2050
(ulong) ut_dulint_get_low(
2051
btr_page_get_index_id(frame)));
2296
fprintf(stderr, "\n!state %lu!\n",
2055
fprintf(stderr, "\nLRU pos %lu !state %lu!\n",
2056
(ulong) buf_page_get_LRU_position(bpage),
2297
2057
(ulong) buf_page_get_state(bpage));
2301
mutex_exit(buf_page_get_mutex(bpage));
2302
2061
bpage = UT_LIST_GET_NEXT(LRU, bpage);
2305
buf_pool_mutex_exit(buf_pool);
2308
/**********************************************************************//**
2309
Prints the LRU list. */
2316
buf_pool_t* buf_pool;
2318
for (i = 0; i < srv_buf_pool_instances; i++) {
2319
buf_pool = buf_pool_from_array(i);
2320
buf_LRU_print_instance(buf_pool);
2064
buf_pool_mutex_exit();
2323
2066
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */