1
/*****************************************************************************
3
Copyright (C) 1995, 2010, Innobase Oy. All Rights Reserved.
5
This program is free software; you can redistribute it and/or modify it under
6
the terms of the GNU General Public License as published by the Free Software
7
Foundation; version 2 of the License.
9
This program is distributed in the hope that it will be useful, but WITHOUT
10
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13
You should have received a copy of the GNU General Public License along with
14
this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
15
St, Fifth Floor, Boston, MA 02110-1301 USA
17
*****************************************************************************/
19
/**************************************************//**
20
@file include/buf0flu.h
21
The database buffer pool flush algorithm
23
Created 11/5/1995 Heikki Tuuri
24
*******************************************************/
31
#ifndef UNIV_HOTBACKUP
32
#include "mtr0types.h"
33
#include "buf0types.h"
36
/********************************************************************//**
37
Remove a block from the flush list of modified blocks. */
42
buf_page_t* bpage); /*!< in: pointer to the block in question */
43
/*******************************************************************//**
44
Relocates a buffer control block on the flush_list.
45
Note that it is assumed that the contents of bpage has already been
49
buf_flush_relocate_on_flush_list(
50
/*=============================*/
51
buf_page_t* bpage, /*!< in/out: control block being moved */
52
buf_page_t* dpage); /*!< in/out: destination block */
53
/********************************************************************//**
54
Updates the flush system data structures when a write is completed. */
57
buf_flush_write_complete(
58
/*=====================*/
59
buf_page_t* bpage); /*!< in: pointer to the block in question */
60
/*********************************************************************//**
61
Flushes pages from the end of the LRU list if there is too small
62
a margin of replaceable pages there. If buffer pool is NULL it
63
means flush free margin on all buffer pool instances. */
66
buf_flush_free_margin(
67
/*==================*/
68
buf_pool_t* buf_pool);
69
/*********************************************************************//**
70
Flushes pages from the end of all the LRU lists. */
73
buf_flush_free_margins(void);
74
/*=========================*/
75
#endif /* !UNIV_HOTBACKUP */
76
/********************************************************************//**
77
Initializes a page for writing to the tablespace. */
80
buf_flush_init_for_writing(
81
/*=======================*/
82
byte* page, /*!< in/out: page */
83
void* page_zip_, /*!< in/out: compressed page, or NULL */
84
ib_uint64_t newest_lsn); /*!< in: newest modification lsn
86
#ifndef UNIV_HOTBACKUP
87
/*******************************************************************//**
88
This utility flushes dirty blocks from the end of the LRU list.
89
NOTE: The calling thread may own latches to pages: to avoid deadlocks,
90
this function must be written so that it cannot end up waiting for these
92
@return number of blocks for which the write request was queued;
93
ULINT_UNDEFINED if there was a flush of the same type already running */
98
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
99
ulint min_n); /*!< in: wished minimum mumber of blocks
100
flushed (it is not guaranteed that the
101
actual number is that big, though) */
102
/*******************************************************************//**
103
This utility flushes dirty blocks from the end of the flush_list of
104
all buffer pool instances.
105
NOTE: The calling thread is not allowed to own any latches on pages!
106
@return number of blocks for which the write request was queued;
107
ULINT_UNDEFINED if there was a flush of the same type already running */
112
ulint min_n, /*!< in: wished minimum mumber of blocks
113
flushed (it is not guaranteed that the
114
actual number is that big, though) */
115
ib_uint64_t lsn_limit); /*!< in the case BUF_FLUSH_LIST all
116
blocks whose oldest_modification is
117
smaller than this should be flushed
118
(if their number does not exceed
119
min_n), otherwise ignored */
120
/******************************************************************//**
121
Waits until a flush batch of the given type ends */
124
buf_flush_wait_batch_end(
125
/*=====================*/
126
buf_pool_t* buf_pool, /*!< buffer pool instance */
127
enum buf_flush type); /*!< in: BUF_FLUSH_LRU
129
/********************************************************************//**
130
This function should be called at a mini-transaction commit, if a page was
131
modified in it. Puts the block to the list of modified blocks, if it not
135
buf_flush_note_modification(
136
/*========================*/
137
buf_block_t* block, /*!< in: block which is modified */
138
mtr_t* mtr); /*!< in: mtr */
139
/********************************************************************//**
140
This function should be called when recovery has modified a buffer page. */
143
buf_flush_recv_note_modification(
144
/*=============================*/
145
buf_block_t* block, /*!< in: block which is modified */
146
ib_uint64_t start_lsn, /*!< in: start lsn of the first mtr in a
148
ib_uint64_t end_lsn); /*!< in: end lsn of the last mtr in the
150
/********************************************************************//**
151
Returns TRUE if the file page block is immediately suitable for replacement,
152
i.e., transition FILE_PAGE => NOT_USED allowed.
153
@return TRUE if can replace immediately */
156
buf_flush_ready_for_replace(
157
/*========================*/
158
buf_page_t* bpage); /*!< in: buffer control block, must be
159
buf_page_in_file(bpage) and in the LRU list */
161
/** @brief Statistics for selecting flush rate based on redo log
164
These statistics are generated for heuristics used in estimating the
165
rate at which we should flush the dirty blocks to avoid bursty IO
166
activity. Note that the rate of flushing not only depends on how many
167
dirty pages we have in the buffer pool but it is also a fucntion of
168
how much redo the workload is generating and at what rate. */
170
struct buf_flush_stat_struct
172
ib_uint64_t redo; /**< amount of redo generated. */
173
ulint n_flushed; /**< number of pages flushed. */
176
/** Statistics for selecting flush rate of dirty pages. */
177
typedef struct buf_flush_stat_struct buf_flush_stat_t;
178
/*********************************************************************
179
Update the historical stats that we are collecting for flush rate
180
heuristics at the end of each interval. */
183
buf_flush_stat_update(void);
184
/*=======================*/
185
/*********************************************************************
186
Determines the fraction of dirty pages that need to be flushed based
187
on the speed at which we generate redo log. Note that if redo log
188
is generated at significant rate without a corresponding increase
189
in the number of dirty pages (for example, an in-memory workload)
190
it can cause IO bursts of flushing. This function implements heuristics
191
to avoid this burstiness.
192
@return number of dirty pages to be flushed / second */
195
buf_flush_get_desired_flush_rate(void);
196
/*==================================*/
198
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
199
/******************************************************************//**
200
Validates the flush list.
201
@return TRUE if ok */
206
buf_pool_t* buf_pool);
207
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
209
/********************************************************************//**
210
Initialize the red-black tree to speed up insertions into the flush_list
211
during recovery process. Should be called at the start of recovery
212
process before any page has been read/written. */
215
buf_flush_init_flush_rbt(void);
216
/*==========================*/
218
/********************************************************************//**
219
Frees up the red-black tree. */
222
buf_flush_free_flush_rbt(void);
223
/*==========================*/
225
/** When buf_flush_free_margin is called, it tries to make this many blocks
226
available to replacement in the free list and at the end of the LRU list (to
227
make sure that a read-ahead batch can be read efficiently in a single
229
#define BUF_FLUSH_FREE_BLOCK_MARGIN(b) (5 + BUF_READ_AHEAD_AREA(b))
230
/** Extra margin to apply above BUF_FLUSH_FREE_BLOCK_MARGIN */
231
#define BUF_FLUSH_EXTRA_MARGIN(b) ((BUF_FLUSH_FREE_BLOCK_MARGIN(b) / 4 \
232
+ 100) / srv_buf_pool_instances)
233
#endif /* !UNIV_HOTBACKUP */
236
#include "buf0flu.ic"