1
1
/*****************************************************************************
3
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved.
3
Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved.
4
4
Copyright (c) 2008, Google Inc.
6
6
Portions of this file contain modifications contributed and copyrighted by
31
31
*******************************************************/
33
33
#include "mtr0mtr.h"
34
#ifndef UNIV_HOTBACKUP
34
35
#include "buf0flu.h"
35
36
#include "buf0lru.h"
36
37
#include "buf0rea.h"
39
/*********************************************************************//**
40
Gets the current size of buffer buf_pool in bytes.
41
@return size in bytes */
44
buf_pool_get_curr_size(void)
45
/*========================*/
47
return(srv_buf_pool_curr_size);
50
/*********************************************************************//**
51
Gets the current size of buffer buf_pool in pages.
52
@return size in pages*/
55
buf_pool_get_n_pages(void)
56
/*======================*/
58
return(buf_pool_get_curr_size() / UNIV_PAGE_SIZE);
61
39
/********************************************************************//**
62
40
Reads the freed_page_clock of a buffer block.
94
72
/*=====================*/
95
73
const buf_page_t* bpage) /*!< in: block to make younger */
97
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
99
if (UNIV_UNLIKELY(buf_pool->freed_page_clock == 0)) {
100
/* If eviction has not started yet, do not update the
101
statistics or move blocks in the LRU list. This is
102
either the warm-up phase or an in-memory workload. */
104
} else if (buf_LRU_old_threshold_ms && bpage->old) {
105
unsigned access_time = buf_page_is_accessed(bpage);
108
&& ((ib_uint32_t) (ut_time_ms() - access_time))
109
>= buf_LRU_old_threshold_ms) {
113
buf_pool->stat.n_pages_not_made_young++;
75
return(buf_pool->freed_page_clock
76
>= buf_page_get_freed_page_clock(bpage)
77
+ 1 + (buf_pool->curr_size / 4));
80
/*********************************************************************//**
81
Gets the current size of buffer buf_pool in bytes.
82
@return size in bytes */
85
buf_pool_get_curr_size(void)
86
/*========================*/
88
return(buf_pool->curr_size * UNIV_PAGE_SIZE);
91
/********************************************************************//**
92
Gets the smallest oldest_modification lsn for any page in the pool. Returns
93
zero if all modified pages have been flushed to disk.
94
@return oldest modification in pool, zero if none */
97
buf_pool_get_oldest_modification(void)
98
/*==================================*/
103
buf_pool_mutex_enter();
105
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
116
/* FIXME: bpage->freed_page_clock is 31 bits */
117
return((buf_pool->freed_page_clock & ((1UL << 31) - 1))
118
> ((ulint) bpage->freed_page_clock
119
+ (buf_pool->curr_size
120
* (BUF_LRU_OLD_RATIO_DIV - buf_pool->LRU_old_ratio)
121
/ (BUF_LRU_OLD_RATIO_DIV * 4))));
110
ut_ad(bpage->in_flush_list);
111
lsn = bpage->oldest_modification;
114
buf_pool_mutex_exit();
116
/* The returned answer may be out of date: the flush_list can
117
change after the mutex has been released. */
122
/*******************************************************************//**
123
Increments the buf_pool clock by one and returns its new value. Remember
124
that in the 32 bit version the clock wraps around at 4 billion!
125
@return new clock value */
128
buf_pool_clock_tic(void)
129
/*====================*/
131
ut_ad(buf_pool_mutex_own());
133
buf_pool->ulint_clock++;
135
return(buf_pool->ulint_clock);
137
#endif /* !UNIV_HOTBACKUP */
125
139
/*********************************************************************//**
126
140
Gets the state of a block.
268
282
/*********************************************************************//**
283
Determine the approximate LRU list position of a block.
284
@return LRU list position */
287
buf_page_get_LRU_position(
288
/*======================*/
289
const buf_page_t* bpage) /*!< in: control block */
291
ut_ad(buf_page_in_file(bpage));
292
ut_ad(buf_pool_mutex_own());
294
return(bpage->LRU_position);
297
/*********************************************************************//**
269
298
Gets the mutex of a block.
270
299
@return pointer to mutex protecting bpage */
274
303
/*===============*/
275
304
const buf_page_t* bpage) /*!< in: pointer to control block */
277
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
279
306
switch (buf_page_get_state(bpage)) {
280
307
case BUF_BLOCK_ZIP_FREE:
283
310
case BUF_BLOCK_ZIP_PAGE:
284
311
case BUF_BLOCK_ZIP_DIRTY:
285
return(&buf_pool->zip_mutex);
312
return(&buf_pool_zip_mutex);
287
314
return(&((buf_block_t*) bpage)->mutex);
383
410
buf_page_t* bpage, /*!< in/out: control block */
384
411
enum buf_io_fix io_fix) /*!< in: io_fix state */
387
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
388
ut_ad(buf_pool_mutex_own(buf_pool));
413
ut_ad(buf_pool_mutex_own());
390
414
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
392
416
bpage->io_fix = io_fix;
414
438
/*==================*/
415
439
const buf_page_t* bpage) /*!< control block being relocated */
418
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
419
ut_ad(buf_pool_mutex_own(buf_pool));
441
ut_ad(buf_pool_mutex_own());
421
442
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
422
443
ut_ad(buf_page_in_file(bpage));
423
444
ut_ad(bpage->in_LRU_list);
453
471
buf_page_t* bpage, /*!< in/out: control block */
454
472
ibool old) /*!< in: old */
457
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
458
#endif /* UNIV_DEBUG */
459
474
ut_a(buf_page_in_file(bpage));
460
ut_ad(buf_pool_mutex_own(buf_pool));
475
ut_ad(buf_pool_mutex_own());
461
476
ut_ad(bpage->in_LRU_list);
463
478
#ifdef UNIV_LRU_DEBUG
464
ut_a((buf_pool->LRU_old_len == 0) == (buf_pool->LRU_old == NULL));
465
/* If a block is flagged "old", the LRU_old list must exist. */
466
ut_a(!old || buf_pool->LRU_old);
468
if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)) {
469
const buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
470
const buf_page_t* next = UT_LIST_GET_NEXT(LRU, bpage);
471
if (prev->old == next->old) {
472
ut_a(prev->old == old);
475
ut_a(buf_pool->LRU_old == (old ? bpage : next));
479
if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)
480
&& UT_LIST_GET_PREV(LRU, bpage)->old
481
== UT_LIST_GET_NEXT(LRU, bpage)->old) {
482
ut_a(UT_LIST_GET_PREV(LRU, bpage)->old == old);
478
484
#endif /* UNIV_LRU_DEBUG */
483
489
/*********************************************************************//**
484
Determine the time of first access of a block in the buffer pool.
485
@return ut_time_ms() at the time of first access, 0 if not accessed */
490
Determine if a block has been accessed in the buffer pool.
491
@return TRUE if accessed */
488
494
buf_page_is_accessed(
489
495
/*=================*/
490
496
const buf_page_t* bpage) /*!< in: control block */
492
498
ut_ad(buf_page_in_file(bpage));
494
return(bpage->access_time);
500
return(bpage->accessed);
497
503
/*********************************************************************//**
501
507
buf_page_set_accessed(
502
508
/*==================*/
503
509
buf_page_t* bpage, /*!< in/out: control block */
504
ulint time_ms) /*!< in: ut_time_ms() */
510
ibool accessed) /*!< in: accessed */
507
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
508
ut_ad(buf_pool_mutex_own(buf_pool));
510
512
ut_a(buf_page_in_file(bpage));
513
ut_ad(mutex_own(buf_page_get_mutex(bpage)));
512
if (!bpage->access_time) {
513
/* Make this the time of the first access. */
514
bpage->access_time = time_ms;
515
bpage->accessed = accessed;
518
518
/*********************************************************************//**
703
703
/*========================*/
704
704
const buf_block_t* block) /*!< in: block */
707
ut_ad(buf_page_in_file(&block->page));
708
#ifdef UNIV_SYNC_DEBUG
709
ut_ad(rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_EXCLUSIVE)
710
|| rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_SHARED));
711
#endif /* UNIV_SYNC_DEBUG */
712
706
return(block->lock_hash_val);
715
709
/********************************************************************//**
710
Allocates a buffer block.
711
@return own: the allocated block, in state BUF_BLOCK_MEMORY */
716
ulint zip_size) /*!< in: compressed page size in bytes,
717
or 0 if uncompressed tablespace */
721
block = buf_LRU_get_free_block(zip_size);
723
buf_block_set_state(block, BUF_BLOCK_MEMORY);
728
/********************************************************************//**
716
729
Frees a buffer block which does not contain a file page. */
806
817
buf_block_t* block) /*!< in: block */
808
819
#ifdef UNIV_SYNC_DEBUG
809
buf_pool_t* buf_pool = buf_pool_from_bpage((buf_page_t*)block);
811
ut_ad((buf_pool_mutex_own(buf_pool)
820
ut_ad((buf_pool_mutex_own()
812
821
&& (block->page.buf_fix_count == 0))
813
822
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
814
823
#endif /* UNIV_SYNC_DEBUG */
889
898
/******************************************************************//**
890
Returns the buffer pool instance given a page instance
896
const buf_page_t* bpage) /*!< in: buffer pool page */
898
/* Every page must be in some buffer pool. */
899
ut_ad(bpage->buf_pool != NULL);
901
return(bpage->buf_pool);
904
/******************************************************************//**
905
Returns the buffer pool instance given a block instance
911
const buf_block_t* block) /*!< in: block */
913
return(buf_pool_from_bpage(&block->page));
916
/******************************************************************//**
917
Returns the buffer pool instance given space and offset of page
918
@return buffer pool */
923
ulint space, /*!< in: space id */
924
ulint offset) /*!< in: offset of the page within space */
928
ulint ignored_offset;
930
ignored_offset = offset >> 6; /* 2log of BUF_READ_AHEAD_AREA (64)*/
931
fold = buf_page_address_fold(space, ignored_offset);
932
index = fold % srv_buf_pool_instances;
933
return buf_pool_ptr[index];
936
/******************************************************************//**
937
Returns the buffer pool instance given its array index
938
@return buffer pool */
943
ulint index) /*!< in: array index to get
944
buffer pool instance from */
946
return buf_pool_ptr[index];
949
/******************************************************************//**
950
899
Returns the control block of a file page, NULL if not found.
951
900
@return block, NULL if not found */
954
buf_page_hash_get_low(
955
/*==================*/
956
buf_pool_t* buf_pool, /*!< buffer pool instance */
957
ulint space, /*!< in: space id */
958
ulint offset, /*!< in: offset of the page
960
ulint fold) /*!< in: buf_page_address_fold(
905
ulint space, /*!< in: space id */
906
ulint offset) /*!< in: offset of the page within space */
963
908
buf_page_t* bpage;
966
ut_ad(buf_pool_mutex_own(buf_pool));
967
ut_ad(fold == buf_page_address_fold(space, offset));
912
ut_ad(buf_pool_mutex_own());
969
914
/* Look for the page in the hash table */
916
fold = buf_page_address_fold(space, offset);
971
918
HASH_SEARCH(hash, buf_pool->page_hash, fold, buf_page_t*, bpage,
972
919
ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
973
920
&& buf_page_in_file(bpage)),
976
923
ut_a(buf_page_in_file(bpage));
977
924
ut_ad(bpage->in_page_hash);
978
925
ut_ad(!bpage->in_zip_hash);
979
#if UNIV_WORD_SIZE == 4
980
/* On 32-bit systems, there is no padding in
981
buf_page_t. On other systems, Valgrind could complain
982
about uninitialized pad bytes. */
983
926
UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
990
/******************************************************************//**
991
Returns the control block of a file page, NULL if not found.
992
@return block, NULL if not found or not a real control block */
997
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
998
ulint space, /*!< in: space id */
999
ulint offset) /*!< in: offset of the page
1003
ulint fold = buf_page_address_fold(space, offset);
1005
bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
1007
if (bpage && buf_pool_watch_is_sentinel(buf_pool, bpage)) {
1020
938
buf_block_hash_get(
1021
939
/*===============*/
1022
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1023
ulint space, /*!< in: space id */
1024
ulint offset) /*!< in: offset of the page
940
ulint space, /*!< in: space id */
941
ulint offset) /*!< in: offset of the page within space */
1029
block = buf_page_get_block(buf_page_hash_get(buf_pool, space, offset));
943
return(buf_page_get_block(buf_page_hash_get(space, offset)));
1034
946
/********************************************************************//**
1046
958
ulint offset) /*!< in: page number */
1048
960
const buf_page_t* bpage;
1049
buf_pool_t* buf_pool = buf_pool_get(space, offset);
1051
buf_pool_mutex_enter(buf_pool);
1053
bpage = buf_page_hash_get(buf_pool, space, offset);
1055
buf_pool_mutex_exit(buf_pool);
962
buf_pool_mutex_enter();
964
bpage = buf_page_hash_get(space, offset);
966
buf_pool_mutex_exit();
1057
968
return(bpage != NULL);
1074
984
switch (buf_page_get_state(bpage)) {
1075
985
case BUF_BLOCK_ZIP_PAGE:
1076
986
case BUF_BLOCK_ZIP_DIRTY:
1077
mutex_enter(&buf_pool->zip_mutex);
987
mutex_enter(&buf_pool_zip_mutex);
1078
988
bpage->buf_fix_count--;
1079
mutex_exit(&buf_pool->zip_mutex);
989
mutex_exit(&buf_pool_zip_mutex);
1081
991
case BUF_BLOCK_FILE_PAGE:
1082
992
block = (buf_block_t*) bpage;
1107
1016
buf_page_release(
1108
1017
/*=============*/
1109
1018
buf_block_t* block, /*!< in: buffer block */
1110
ulint rw_latch) /*!< in: RW_S_LATCH, RW_X_LATCH,
1019
ulint rw_latch, /*!< in: RW_S_LATCH, RW_X_LATCH,
1021
mtr_t* mtr) /*!< in: mtr */
1115
1025
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1116
1026
ut_a(block->page.buf_fix_count > 0);
1028
if (rw_latch == RW_X_LATCH && mtr->modifications) {
1029
buf_pool_mutex_enter();
1030
buf_flush_note_modification(block, mtr);
1031
buf_pool_mutex_exit();
1118
1034
mutex_enter(&block->mutex);
1120
1036
#ifdef UNIV_SYNC_DEBUG
1147
1063
sync_thread_add_level(&block->lock, level);
1149
1065
#endif /* UNIV_SYNC_DEBUG */
1150
/********************************************************************//**
1151
Acquire mutex on all buffer pool instances. */
1154
buf_pool_mutex_enter_all(void)
1155
/*==========================*/
1159
for (i = 0; i < srv_buf_pool_instances; i++) {
1160
buf_pool_t* buf_pool;
1162
buf_pool = buf_pool_from_array(i);
1163
buf_pool_mutex_enter(buf_pool);
1167
/********************************************************************//**
1168
Release mutex on all buffer pool instances. */
1171
buf_pool_mutex_exit_all(void)
1172
/*=========================*/
1176
for (i = 0; i < srv_buf_pool_instances; i++) {
1177
buf_pool_t* buf_pool;
1179
buf_pool = buf_pool_from_array(i);
1180
buf_pool_mutex_exit(buf_pool);
1183
1066
#endif /* !UNIV_HOTBACKUP */