51
51
#include "srv0srv.h"
53
53
/** The number of blocks from the LRU_old pointer onward, including
54
the block pointed to, must be buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
54
the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
55
55
of the whole LRU list length, except that the tolerance defined below
56
56
is allowed. Note that the tolerance must be small enough such that for
57
57
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
97
97
#define BUF_LRU_IO_TO_UNZIP_FACTOR 50
99
99
/** Sampled values buf_LRU_stat_cur.
100
Protected by buf_pool_mutex. Updated by buf_LRU_stat_update(). */
100
Not protected by any mutex. Updated by buf_LRU_stat_update(). */
101
101
static buf_LRU_stat_t buf_LRU_stat_arr[BUF_LRU_STAT_N_INTERVAL];
102
103
/** Cursor to buf_LRU_stat_arr[] that is updated in a round-robin fashion. */
103
104
static ulint buf_LRU_stat_arr_ind;
107
108
UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_cur;
109
110
/** Running sum of past values of buf_LRU_stat_cur.
110
Updated by buf_LRU_stat_update(). Protected by buf_pool_mutex. */
111
Updated by buf_LRU_stat_update(). Not Protected by any mutex. */
111
112
UNIV_INTERN buf_LRU_stat_t buf_LRU_stat_sum;
115
116
/** @name Heuristics for detecting index scan @{ */
116
/** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for
117
"old" blocks. Protected by buf_pool_mutex. */
118
UNIV_INTERN uint buf_LRU_old_ratio;
119
117
/** Move blocks to "new" LRU list only if the first access was at
120
118
least this many milliseconds ago. Not protected by any mutex or latch. */
121
119
UNIV_INTERN uint buf_LRU_old_threshold_ms;
124
122
/******************************************************************//**
125
123
Takes a block out of the LRU list and page hash table.
126
124
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
127
the object will be freed and buf_pool_zip_mutex will be released.
125
the object will be freed and buf_pool->zip_mutex will be released.
129
127
If a compressed page or a compressed-only block descriptor is freed,
130
128
other compressed pages or compressed-only block descriptors may be
155
153
@return TRUE if should use unzip_LRU */
158
buf_LRU_evict_from_unzip_LRU(void)
159
/*==============================*/
156
buf_LRU_evict_from_unzip_LRU(
157
/*=========================*/
158
buf_pool_t* buf_pool)
164
ut_ad(buf_pool_mutex_own());
163
ut_ad(buf_pool_mutex_own(buf_pool));
166
165
/* If the unzip_LRU list is empty, we can only use the LRU. */
167
166
if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
230
229
buf_LRU_drop_page_hash_for_tablespace(
231
230
/*==================================*/
232
ulint id) /*!< in: space id */
231
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
232
ulint id) /*!< in: space id */
234
234
buf_page_t* bpage;
284
285
if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
287
/* Array full. We release the buf_pool_mutex to
289
/* Array full. We release the buf_pool->mutex to
288
290
obey the latching order. */
289
buf_pool_mutex_exit();
291
buf_LRU_drop_page_hash_batch(id, zip_size, page_arr,
291
buf_pool_mutex_exit(buf_pool);
293
buf_LRU_drop_page_hash_batch(
294
id, zip_size, page_arr, num_entries);
294
buf_pool_mutex_enter();
298
buf_pool_mutex_enter(buf_pool);
296
300
mutex_exit(block_mutex);
319
buf_pool_mutex_exit();
323
buf_pool_mutex_exit(buf_pool);
321
325
/* Drop any remaining batch of search hashed pages. */
322
326
buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
326
330
/******************************************************************//**
327
Invalidates all pages belonging to a given tablespace when we are deleting
328
the data file(s) of that tablespace. */
331
Invalidates all pages belonging to a given tablespace inside a specific
332
buffer pool instance when we are deleting the data file(s) of that
331
buf_LRU_invalidate_tablespace(
332
/*==========================*/
333
ulint id) /*!< in: space id */
336
buf_LRU_invalidate_tablespace_buf_pool_instance(
337
/*============================================*/
338
buf_pool_t* buf_pool, /*!< buffer pool instance */
339
ulint id) /*!< in: space id */
335
341
buf_page_t* bpage;
338
/* Before we attempt to drop pages one by one we first
339
attempt to drop page hash index entries in batches to make
340
it more efficient. The batching attempt is a best effort
341
attempt and does not guarantee that all pages hash entries
342
will be dropped. We get rid of remaining page hash entries
344
buf_LRU_drop_page_hash_for_tablespace(id);
347
buf_pool_mutex_enter();
345
buf_pool_mutex_enter(buf_pool);
349
347
all_freed = TRUE;
418
416
buf_pool_zip_mutex, it is
419
417
not necessary to acquire
420
418
further mutexes. */
421
ut_ad(&buf_pool_zip_mutex
419
ut_ad(&buf_pool->zip_mutex
423
421
ut_ad(mutex_own(block_mutex));
424
422
prev_bpage_buf_fix = TRUE;
462
460
/* The block_mutex should have been
463
461
released by buf_LRU_block_remove_hashed_page()
464
462
when it returns BUF_BLOCK_ZIP_FREE. */
465
ut_ad(block_mutex == &buf_pool_zip_mutex);
463
ut_ad(block_mutex == &buf_pool->zip_mutex);
466
464
ut_ad(!mutex_own(block_mutex));
468
466
if (prev_bpage_buf_fix) {
499
/******************************************************************//**
500
Invalidates all pages belonging to a given tablespace when we are deleting
501
the data file(s) of that tablespace. */
504
buf_LRU_invalidate_tablespace(
505
/*==========================*/
506
ulint id) /*!< in: space id */
510
/* Before we attempt to drop pages one by one we first
511
attempt to drop page hash index entries in batches to make
512
it more efficient. The batching attempt is a best effort
513
attempt and does not guarantee that all pages hash entries
514
will be dropped. We get rid of remaining page hash entries
516
for (i = 0; i < srv_buf_pool_instances; i++) {
517
buf_pool_t* buf_pool;
519
buf_pool = buf_pool_from_array(i);
520
buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
521
buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
501
525
/********************************************************************//**
502
526
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
507
531
buf_page_t* bpage) /*!< in: pointer to the block in question */
534
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
511
ut_ad(buf_pool_mutex_own());
536
ut_ad(buf_pool_mutex_own(buf_pool));
512
537
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
514
539
/* Find the first successor of bpage in the LRU list
539
564
buf_LRU_free_from_unzip_LRU_list(
540
565
/*=============================*/
541
ulint n_iterations) /*!< in: how many times this has been called
542
repeatedly without result: a high value means
543
that we should search farther; we will search
544
n_iterations / 5 of the unzip_LRU list,
545
or nothing if n_iterations >= 5 */
566
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
567
ulint n_iterations) /*!< in: how many times this has
568
been called repeatedly without
569
result: a high value means that
570
we should search farther; we will
571
search n_iterations / 5 of the
572
unzip_LRU list, or nothing if
547
575
buf_block_t* block;
550
ut_ad(buf_pool_mutex_own());
578
ut_ad(buf_pool_mutex_own(buf_pool));
552
580
/* Theoratically it should be much easier to find a victim
553
581
from unzip_LRU as we can choose even a dirty block (as we'll
610
638
buf_LRU_free_from_common_LRU_list(
611
639
/*==============================*/
612
ulint n_iterations) /*!< in: how many times this has been called
640
buf_pool_t* buf_pool,
642
/*!< in: how many times this has been called
613
643
repeatedly without result: a high value means
614
644
that we should search farther; if
615
645
n_iterations < 10, then we search
677
707
buf_LRU_search_and_free_block(
678
708
/*==========================*/
679
ulint n_iterations) /*!< in: how many times this has been called
709
buf_pool_t* buf_pool,
710
/*!< in: buffer pool instance */
712
/*!< in: how many times this has been called
680
713
repeatedly without result: a high value means
681
714
that we should search farther; if
682
715
n_iterations < 10, then we search
688
721
ibool freed = FALSE;
690
buf_pool_mutex_enter();
723
buf_pool_mutex_enter(buf_pool);
692
freed = buf_LRU_free_from_unzip_LRU_list(n_iterations);
725
freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations);
695
freed = buf_LRU_free_from_common_LRU_list(n_iterations);
728
freed = buf_LRU_free_from_common_LRU_list(
729
buf_pool, n_iterations);
719
buf_LRU_try_free_flushed_blocks(void)
720
/*=================================*/
753
buf_LRU_try_free_flushed_blocks(
754
/*============================*/
755
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
722
buf_pool_mutex_enter();
724
while (buf_pool->LRU_flush_ended > 0) {
726
buf_pool_mutex_exit();
728
buf_LRU_search_and_free_block(1);
730
buf_pool_mutex_enter();
758
if (buf_pool == NULL) {
761
for (i = 0; i < srv_buf_pool_instances; i++) {
762
buf_pool = buf_pool_from_array(i);
763
buf_LRU_try_free_flushed_blocks(buf_pool);
766
buf_pool_mutex_enter(buf_pool);
768
while (buf_pool->LRU_flush_ended > 0) {
770
buf_pool_mutex_exit(buf_pool);
772
buf_LRU_search_and_free_block(buf_pool, 1);
774
buf_pool_mutex_enter(buf_pool);
777
buf_pool_mutex_exit(buf_pool);
733
buf_pool_mutex_exit();
736
781
/******************************************************************//**
737
Returns TRUE if less than 25 % of the buffer pool is available. This can be
738
used in heuristics to prevent huge transactions eating up the whole buffer
739
pool for their locks.
782
Returns TRUE if less than 25 % of the buffer pool in any instance is
783
available. This can be used in heuristics to prevent huge transactions
784
eating up the whole buffer pool for their locks.
740
785
@return TRUE if less than 25 % of buffer pool left */
743
788
buf_LRU_buf_pool_running_out(void)
744
789
/*==============================*/
748
buf_pool_mutex_enter();
750
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
751
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) {
794
for (i = 0; i < srv_buf_pool_instances && !ret; i++) {
795
buf_pool_t* buf_pool;
797
buf_pool = buf_pool_from_array(i);
799
buf_pool_mutex_enter(buf_pool);
801
if (!recv_recovery_on
802
&& UT_LIST_GET_LEN(buf_pool->free)
803
+ UT_LIST_GET_LEN(buf_pool->LRU)
804
< buf_pool->curr_size / 4) {
809
buf_pool_mutex_exit(buf_pool);
756
buf_pool_mutex_exit();
764
818
@return a free control block, or NULL if the buf_block->free list is empty */
767
buf_LRU_get_free_only(void)
768
/*=======================*/
821
buf_LRU_get_free_only(
822
/*==================*/
823
buf_pool_t* buf_pool)
770
825
buf_block_t* block;
772
ut_ad(buf_pool_mutex_own());
827
ut_ad(buf_pool_mutex_own(buf_pool));
774
829
block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
777
833
ut_ad(block->page.in_free_list);
778
834
ut_d(block->page.in_free_list = FALSE);
779
835
ut_ad(!block->page.in_flush_list);
786
842
buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE);
787
843
UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
845
ut_ad(buf_pool_from_block(block) == buf_pool);
789
847
mutex_exit(&block->mutex);
802
860
buf_LRU_get_free_block(
803
861
/*===================*/
804
ulint zip_size) /*!< in: compressed page size in bytes,
805
or 0 if uncompressed tablespace */
862
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
863
ulint zip_size) /*!< in: compressed page size in bytes,
864
or 0 if uncompressed tablespace */
807
866
buf_block_t* block = NULL;
810
869
ibool mon_value_was = FALSE;
811
870
ibool started_monitor = FALSE;
813
buf_pool_mutex_enter();
872
buf_pool_mutex_enter(buf_pool);
815
874
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
816
875
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
879
938
/* If there is a block in the free list, take it */
880
block = buf_LRU_get_free_only();
939
block = buf_LRU_get_free_only(buf_pool);
942
ut_ad(buf_pool_from_block(block) == buf_pool);
883
944
#ifdef UNIV_DEBUG
884
945
block->page.zip.m_start =
885
946
#endif /* UNIV_DEBUG */
890
951
if (UNIV_UNLIKELY(zip_size)) {
892
953
page_zip_set_size(&block->page.zip, zip_size);
893
block->page.zip.data = buf_buddy_alloc(zip_size, &lru);
955
block->page.zip.data = buf_buddy_alloc(
956
buf_pool, zip_size, &lru);
894
958
UNIV_MEM_DESC(block->page.zip.data, zip_size, block);
896
960
page_zip_set_size(&block->page.zip, 0);
897
961
block->page.zip.data = NULL;
900
buf_pool_mutex_exit();
964
buf_pool_mutex_exit(buf_pool);
902
966
if (started_monitor) {
903
967
srv_print_innodb_monitor = mon_value_was;
909
973
/* If no block was in the free list, search from the end of the LRU
910
974
list and try to free a block there */
912
buf_pool_mutex_exit();
976
buf_pool_mutex_exit(buf_pool);
914
freed = buf_LRU_search_and_free_block(n_iterations);
978
freed = buf_LRU_search_and_free_block(buf_pool, n_iterations);
954
1018
/* No free block was found: try to flush the LRU list */
956
buf_flush_free_margin();
1020
buf_flush_free_margin(buf_pool);
957
1021
++srv_buf_pool_wait_free;
959
1023
os_aio_simulated_wake_handler_threads();
961
buf_pool_mutex_enter();
1025
buf_pool_mutex_enter(buf_pool);
963
1027
if (buf_pool->LRU_flush_ended > 0) {
964
1028
/* We have written pages in an LRU flush. To make the insert
965
1029
buffer more efficient, we try to move these pages to the free
968
buf_pool_mutex_exit();
1032
buf_pool_mutex_exit(buf_pool);
970
buf_LRU_try_free_flushed_blocks();
1034
buf_LRU_try_free_flushed_blocks(buf_pool);
972
buf_pool_mutex_exit();
1036
buf_pool_mutex_exit(buf_pool);
975
1039
if (n_iterations > 10) {
987
1051
is inside the allowed limits. */
990
buf_LRU_old_adjust_len(void)
991
/*========================*/
1054
buf_LRU_old_adjust_len(
1055
/*===================*/
1056
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
996
1061
ut_a(buf_pool->LRU_old);
997
ut_ad(buf_pool_mutex_own());
998
ut_ad(buf_LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
999
ut_ad(buf_LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
1062
ut_ad(buf_pool_mutex_own(buf_pool));
1063
ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
1064
ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
1000
1065
#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
1001
1066
# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)"
1013
1078
old_len = buf_pool->LRU_old_len;
1014
1079
new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1015
* buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
1080
* buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
1016
1081
UT_LIST_GET_LEN(buf_pool->LRU)
1017
1082
- (BUF_LRU_OLD_TOLERANCE
1018
1083
+ BUF_LRU_NON_OLD_MIN_LEN));
1054
1119
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
1057
buf_LRU_old_init(void)
1058
/*==================*/
1124
buf_pool_t* buf_pool)
1060
1126
buf_page_t* bpage;
1062
ut_ad(buf_pool_mutex_own());
1128
ut_ad(buf_pool_mutex_own(buf_pool));
1063
1129
ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
1065
1131
/* We first initialize all blocks in the LRU list as old and then use
1078
1144
buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
1079
1145
buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
1081
buf_LRU_old_adjust_len();
1147
buf_LRU_old_adjust_len(buf_pool);
1084
1150
/******************************************************************//**
1089
1155
/*=================================*/
1090
1156
buf_page_t* bpage) /*!< in/out: control block */
1158
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1092
1160
ut_ad(buf_pool);
1094
1162
ut_ad(buf_page_in_file(bpage));
1095
ut_ad(buf_pool_mutex_own());
1163
ut_ad(buf_pool_mutex_own(buf_pool));
1097
1165
if (buf_page_belongs_to_unzip_LRU(bpage)) {
1098
1166
buf_block_t* block = (buf_block_t*) bpage;
1112
1180
/*=================*/
1113
1181
buf_page_t* bpage) /*!< in: control block */
1183
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1115
1185
ut_ad(buf_pool);
1117
ut_ad(buf_pool_mutex_own());
1187
ut_ad(buf_pool_mutex_own(buf_pool));
1119
1189
ut_a(buf_page_in_file(bpage));
1128
1198
/* Below: the previous block is guaranteed to exist,
1129
1199
because the LRU_old pointer is only allowed to differ
1130
1200
by BUF_LRU_OLD_TOLERANCE from strict
1131
buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
1201
buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
1132
1202
list length. */
1133
1203
buf_page_t* prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
1187
1257
ibool old) /*!< in: TRUE if should be put to the end
1188
1258
of the list, else put to the start */
1260
buf_pool_t* buf_pool = buf_pool_from_block(block);
1190
1262
ut_ad(buf_pool);
1192
ut_ad(buf_pool_mutex_own());
1264
ut_ad(buf_pool_mutex_own(buf_pool));
1194
1266
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
1211
1283
/*=========================*/
1212
1284
buf_page_t* bpage) /*!< in: control block */
1286
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1214
1288
ut_ad(buf_pool);
1216
ut_ad(buf_pool_mutex_own());
1290
ut_ad(buf_pool_mutex_own(buf_pool));
1218
1292
ut_a(buf_page_in_file(bpage));
1230
1304
buf_page_set_old(bpage, TRUE);
1231
1305
buf_pool->LRU_old_len++;
1232
buf_LRU_old_adjust_len();
1306
buf_LRU_old_adjust_len(buf_pool);
1234
1308
} else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1236
1310
/* The LRU list is now long enough for LRU_old to become
1237
1311
defined: init it */
1313
buf_LRU_old_init(buf_pool);
1241
1315
buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1260
1334
LRU list is very short, the block is added to
1261
1335
the start, regardless of this parameter */
1337
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1263
1339
ut_ad(buf_pool);
1265
ut_ad(buf_pool_mutex_own());
1341
ut_ad(buf_pool_mutex_own(buf_pool));
1267
1343
ut_a(buf_page_in_file(bpage));
1268
1344
ut_ad(!bpage->in_LRU_list);
1296
1372
/* Adjust the length of the old block list if necessary */
1298
1374
buf_page_set_old(bpage, old);
1299
buf_LRU_old_adjust_len();
1375
buf_LRU_old_adjust_len(buf_pool);
1301
1377
} else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1303
1379
/* The LRU list is now long enough for LRU_old to become
1304
1380
defined: init it */
1382
buf_LRU_old_init(buf_pool);
1308
1384
buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1339
1415
/*=====================*/
1340
1416
buf_page_t* bpage) /*!< in: control block */
1342
ut_ad(buf_pool_mutex_own());
1418
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1420
ut_ad(buf_pool_mutex_own(buf_pool));
1344
1422
if (bpage->old) {
1345
1423
buf_pool->stat.n_pages_made_young++;
1387
1465
was temporarily released, or NULL */
1389
1467
buf_page_t* b = NULL;
1468
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1390
1469
mutex_t* block_mutex = buf_page_get_mutex(bpage);
1392
ut_ad(buf_pool_mutex_own());
1471
ut_ad(buf_pool_mutex_own(buf_pool));
1393
1472
ut_ad(mutex_own(block_mutex));
1394
1473
ut_ad(buf_page_in_file(bpage));
1395
1474
ut_ad(bpage->in_LRU_list);
1433
1512
If it cannot be allocated (without freeing a block
1434
1513
from the LRU list), refuse to free bpage. */
1436
buf_pool_mutex_exit_forbid();
1437
b = buf_buddy_alloc(sizeof *b, NULL);
1438
buf_pool_mutex_exit_allow();
1515
buf_pool_mutex_exit_forbid(buf_pool);
1516
b = buf_buddy_alloc(buf_pool, sizeof *b, NULL);
1517
buf_pool_mutex_exit_allow(buf_pool);
1440
1519
if (UNIV_UNLIKELY(!b)) {
1441
1520
return(BUF_LRU_CANNOT_RELOCATE);
1457
1536
ut_a(bpage->buf_fix_count == 0);
1460
1540
buf_page_t* prev_b = UT_LIST_GET_PREV(LRU, b);
1461
const ulint fold = buf_page_address_fold(
1542
const ulint fold = buf_page_address_fold(
1462
1543
bpage->space, bpage->offset);
1464
ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
1545
hash_b = buf_page_hash_get_low(
1546
buf_pool, bpage->space, bpage->offset, fold);
1466
1550
b->state = b->oldest_modification
1467
1551
? BUF_BLOCK_ZIP_DIRTY
1521
1605
ut_ad(buf_pool->LRU_old);
1522
1606
/* Adjust the length of the
1523
1607
old block list if necessary */
1524
buf_LRU_old_adjust_len();
1608
buf_LRU_old_adjust_len(buf_pool);
1525
1609
} else if (lru_len == BUF_LRU_OLD_MIN_LEN) {
1526
1610
/* The LRU list is now long
1527
1611
enough for LRU_old to become
1528
1612
defined: init it */
1613
buf_LRU_old_init(buf_pool);
1531
1615
#ifdef UNIV_LRU_DEBUG
1532
1616
/* Check that the "old" flag is consistent
1591
1675
: BUF_NO_CHECKSUM_MAGIC);
1594
buf_pool_mutex_enter();
1678
buf_pool_mutex_enter(buf_pool);
1595
1679
mutex_enter(block_mutex);
1598
mutex_enter(&buf_pool_zip_mutex);
1682
mutex_enter(&buf_pool->zip_mutex);
1599
1683
b->buf_fix_count--;
1600
1684
buf_page_set_io_fix(b, BUF_IO_NONE);
1601
mutex_exit(&buf_pool_zip_mutex);
1685
mutex_exit(&buf_pool->zip_mutex);
1604
1688
buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
1606
1690
/* The block_mutex should have been released by
1607
1691
buf_LRU_block_remove_hashed_page() when it returns
1608
1692
BUF_BLOCK_ZIP_FREE. */
1609
ut_ad(block_mutex == &buf_pool_zip_mutex);
1693
ut_ad(block_mutex == &buf_pool->zip_mutex);
1610
1694
mutex_enter(block_mutex);
1621
1705
/*=============================*/
1622
1706
buf_block_t* block) /*!< in: block, must not contain a file page */
1709
buf_pool_t* buf_pool = buf_pool_from_block(block);
1627
ut_ad(buf_pool_mutex_own());
1712
ut_ad(buf_pool_mutex_own(buf_pool));
1628
1713
ut_ad(mutex_own(&block->mutex));
1630
1715
switch (buf_block_get_state(block)) {
1659
1744
block->page.zip.data = NULL;
1660
1745
mutex_exit(&block->mutex);
1661
buf_pool_mutex_exit_forbid();
1662
buf_buddy_free(data, page_zip_get_size(&block->page.zip));
1663
buf_pool_mutex_exit_allow();
1746
buf_pool_mutex_exit_forbid(buf_pool);
1749
buf_pool, data, page_zip_get_size(&block->page.zip));
1751
buf_pool_mutex_exit_allow(buf_pool);
1664
1752
mutex_enter(&block->mutex);
1665
1753
page_zip_set_size(&block->page.zip, 0);
1674
1762
/******************************************************************//**
1675
1763
Takes a block out of the LRU list and page hash table.
1676
1764
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
1677
the object will be freed and buf_pool_zip_mutex will be released.
1765
the object will be freed and buf_pool->zip_mutex will be released.
1679
1767
If a compressed page or a compressed-only block descriptor is freed,
1680
1768
other compressed pages or compressed-only block descriptors may be
1691
1779
ibool zip) /*!< in: TRUE if should remove also the
1692
1780
compressed page of an uncompressed page */
1694
1783
const buf_page_t* hashed_bpage;
1784
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1696
ut_ad(buf_pool_mutex_own());
1787
ut_ad(buf_pool_mutex_own(buf_pool));
1697
1788
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1699
1790
ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
1782
hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
1873
fold = buf_page_address_fold(bpage->space, bpage->offset);
1874
hashed_bpage = buf_page_hash_get_low(
1875
buf_pool, bpage->space, bpage->offset, fold);
1784
1877
if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
1785
1878
fprintf(stderr,
1811
1904
ut_ad(!bpage->in_zip_hash);
1812
1905
ut_ad(bpage->in_page_hash);
1813
1906
ut_d(bpage->in_page_hash = FALSE);
1814
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash,
1815
buf_page_address_fold(bpage->space, bpage->offset),
1907
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
1817
1908
switch (buf_page_get_state(bpage)) {
1818
1909
case BUF_BLOCK_ZIP_PAGE:
1819
1910
ut_ad(!bpage->in_free_list);
1825
1916
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
1827
mutex_exit(&buf_pool_zip_mutex);
1828
buf_pool_mutex_exit_forbid();
1829
buf_buddy_free(bpage->zip.data,
1830
page_zip_get_size(&bpage->zip));
1831
buf_buddy_free(bpage, sizeof(*bpage));
1832
buf_pool_mutex_exit_allow();
1918
mutex_exit(&buf_pool->zip_mutex);
1919
buf_pool_mutex_exit_forbid(buf_pool);
1922
buf_pool, bpage->zip.data,
1923
page_zip_get_size(&bpage->zip));
1925
buf_buddy_free(buf_pool, bpage, sizeof(*bpage));
1926
buf_pool_mutex_exit_allow(buf_pool);
1833
1928
UNIV_MEM_UNDESC(bpage);
1834
1929
return(BUF_BLOCK_ZIP_FREE);
1851
1946
ut_ad(!bpage->in_flush_list);
1852
1947
ut_ad(!bpage->in_LRU_list);
1853
1948
mutex_exit(&((buf_block_t*) bpage)->mutex);
1854
buf_pool_mutex_exit_forbid();
1855
buf_buddy_free(data, page_zip_get_size(&bpage->zip));
1856
buf_pool_mutex_exit_allow();
1949
buf_pool_mutex_exit_forbid(buf_pool);
1953
page_zip_get_size(&bpage->zip));
1955
buf_pool_mutex_exit_allow(buf_pool);
1857
1956
mutex_enter(&((buf_block_t*) bpage)->mutex);
1858
1957
page_zip_set_size(&bpage->zip, 0);
1882
1981
buf_block_t* block) /*!< in: block, must contain a file page and
1883
1982
be in a state where it can be freed */
1885
ut_ad(buf_pool_mutex_own());
1985
buf_pool_t* buf_pool = buf_pool_from_block(block);
1986
ut_ad(buf_pool_mutex_own(buf_pool));
1886
1988
ut_ad(mutex_own(&block->mutex));
1888
1990
buf_block_set_state(block, BUF_BLOCK_MEMORY);
1893
1995
/**********************************************************************//**
1894
Updates buf_LRU_old_ratio.
1996
Updates buf_pool->LRU_old_ratio for one buffer pool instance.
1895
1997
@return updated old_pct */
1898
buf_LRU_old_ratio_update(
1899
/*=====================*/
1900
uint old_pct,/*!< in: Reserve this percentage of
1901
the buffer pool for "old" blocks. */
1902
ibool adjust) /*!< in: TRUE=adjust the LRU list;
1903
FALSE=just assign buf_LRU_old_ratio
1904
during the initialization of InnoDB */
2000
buf_LRU_old_ratio_update_instance(
2001
/*==============================*/
2002
buf_pool_t* buf_pool,/*!< in: buffer pool instance */
2003
uint old_pct,/*!< in: Reserve this percentage of
2004
the buffer pool for "old" blocks. */
2005
ibool adjust) /*!< in: TRUE=adjust the LRU list;
2006
FALSE=just assign buf_pool->LRU_old_ratio
2007
during the initialization of InnoDB */
1916
buf_pool_mutex_enter();
2019
buf_pool_mutex_enter(buf_pool);
1918
if (ratio != buf_LRU_old_ratio) {
1919
buf_LRU_old_ratio = ratio;
2021
if (ratio != buf_pool->LRU_old_ratio) {
2022
buf_pool->LRU_old_ratio = ratio;
1921
2024
if (UT_LIST_GET_LEN(buf_pool->LRU)
1922
>= BUF_LRU_OLD_MIN_LEN) {
1923
buf_LRU_old_adjust_len();
2025
>= BUF_LRU_OLD_MIN_LEN) {
2027
buf_LRU_old_adjust_len(buf_pool);
1927
buf_pool_mutex_exit();
2031
buf_pool_mutex_exit(buf_pool);
1929
buf_LRU_old_ratio = ratio;
2033
buf_pool->LRU_old_ratio = ratio;
1932
2035
/* the reverse of
1933
2036
ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
1934
2037
return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5));
2040
/**********************************************************************//**
2041
Updates buf_pool->LRU_old_ratio.
2042
@return updated old_pct */
2045
buf_LRU_old_ratio_update(
2046
/*=====================*/
2047
uint old_pct,/*!< in: Reserve this percentage of
2048
the buffer pool for "old" blocks. */
2049
ibool adjust) /*!< in: TRUE=adjust the LRU list;
2050
FALSE=just assign buf_pool->LRU_old_ratio
2051
during the initialization of InnoDB */
2054
ulint new_ratio = 0;
2056
for (i = 0; i < srv_buf_pool_instances; i++) {
2057
buf_pool_t* buf_pool;
2059
buf_pool = buf_pool_from_array(i);
2061
new_ratio = buf_LRU_old_ratio_update_instance(
2062
buf_pool, old_pct, adjust);
1937
2068
/********************************************************************//**
1938
2069
Update the historical stats that we are collecting for LRU eviction
1939
2070
policy at the end of each interval. */
1942
2073
buf_LRU_stat_update(void)
1943
2074
/*=====================*/
1945
2077
buf_LRU_stat_t* item;
2078
buf_pool_t* buf_pool;
2079
ibool evict_started = FALSE;
1947
2081
/* If we haven't started eviction yet then don't update stats. */
1948
if (buf_pool->freed_page_clock == 0) {
2082
for (i = 0; i < srv_buf_pool_instances; i++) {
2084
buf_pool = buf_pool_from_array(i);
2086
if (buf_pool->freed_page_clock != 0) {
2087
evict_started = TRUE;
2092
if (!evict_started) {
1949
2093
goto func_exit;
1952
buf_pool_mutex_enter();
1954
2096
/* Update the index. */
1955
2097
item = &buf_LRU_stat_arr[buf_LRU_stat_arr_ind];
1956
2098
buf_LRU_stat_arr_ind++;
1963
2105
/* Put current entry in the array. */
1964
2106
memcpy(item, &buf_LRU_stat_cur, sizeof *item);
1966
buf_pool_mutex_exit();
1969
2109
/* Clear the current entry. */
1970
2110
memset(&buf_LRU_stat_cur, 0, sizeof buf_LRU_stat_cur);
1973
2113
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1974
2114
/**********************************************************************//**
1975
Validates the LRU list.
1979
buf_LRU_validate(void)
1980
/*==================*/
2115
Validates the LRU list for one buffer pool instance. */
2118
buf_LRU_validate_instance(
2119
/*======================*/
2120
buf_pool_t* buf_pool)
1982
2122
buf_page_t* bpage;
1983
2123
buf_block_t* block;
1987
2127
ut_ad(buf_pool);
1988
buf_pool_mutex_enter();
2128
buf_pool_mutex_enter(buf_pool);
1990
2130
if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
1992
2132
ut_a(buf_pool->LRU_old);
1993
2133
old_len = buf_pool->LRU_old_len;
1994
2134
new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1995
* buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
2135
* buf_pool->LRU_old_ratio
2136
/ BUF_LRU_OLD_RATIO_DIV,
1996
2137
UT_LIST_GET_LEN(buf_pool->LRU)
1997
2138
- (BUF_LRU_OLD_TOLERANCE
1998
2139
+ BUF_LRU_NON_OLD_MIN_LEN));
2068
2209
ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
2071
buf_pool_mutex_exit();
2212
buf_pool_mutex_exit(buf_pool);
2215
/**********************************************************************//**
2216
Validates the LRU list.
2220
buf_LRU_validate(void)
2221
/*==================*/
2225
for (i = 0; i < srv_buf_pool_instances; i++) {
2226
buf_pool_t* buf_pool;
2228
buf_pool = buf_pool_from_array(i);
2229
buf_LRU_validate_instance(buf_pool);
2074
2234
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2076
2236
#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2077
2237
/**********************************************************************//**
2078
Prints the LRU list. */
2238
Prints the LRU list for one buffer pool instance. */
2241
buf_LRU_print_instance(
2242
/*===================*/
2243
buf_pool_t* buf_pool)
2084
2245
const buf_page_t* bpage;
2086
2247
ut_ad(buf_pool);
2087
buf_pool_mutex_enter();
2248
buf_pool_mutex_enter(buf_pool);
2089
2250
bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
2091
2252
while (bpage != NULL) {
2254
mutex_enter(buf_page_get_mutex(bpage));
2093
2255
fprintf(stderr, "BLOCK space %lu page %lu ",
2094
2256
(ulong) buf_page_get_space(bpage),
2095
2257
(ulong) buf_page_get_page_no(bpage));
2303
mutex_exit(buf_page_get_mutex(bpage));
2141
2304
bpage = UT_LIST_GET_NEXT(LRU, bpage);
2144
buf_pool_mutex_exit();
2307
buf_pool_mutex_exit(buf_pool);
2310
/**********************************************************************//**
2311
Prints the LRU list. */
2318
buf_pool_t* buf_pool;
2320
for (i = 0; i < srv_buf_pool_instances; i++) {
2321
buf_pool = buf_pool_from_array(i);
2322
buf_LRU_print_instance(buf_pool);
2146
2325
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */