41
42
buf_page_t* bpage); /*!< in: pointer to the block in question */
42
/********************************************************************//**
43
/*******************************************************************//**
43
44
Relocates a buffer control block on the flush_list.
44
45
Note that it is assumed that the contents of bpage has already been
45
46
copied to dpage. */
58
59
buf_page_t* bpage); /*!< in: pointer to the block in question */
59
60
/*********************************************************************//**
60
61
Flushes pages from the end of the LRU list if there is too small
61
a margin of replaceable pages there. */
64
buf_flush_free_margin(void);
65
/*=======================*/
62
a margin of replaceable pages there. If buffer pool is NULL it
63
means flush free margin on all buffer pool instances. */
66
buf_flush_free_margin(
67
/*==================*/
68
buf_pool_t* buf_pool);
69
/*********************************************************************//**
70
Flushes pages from the end of all the LRU lists. */
73
buf_flush_free_margins(void);
74
/*=========================*/
66
75
#endif /* !UNIV_HOTBACKUP */
67
76
/********************************************************************//**
68
77
Initializes a page for writing to the tablespace. */
77
86
#ifndef UNIV_HOTBACKUP
78
87
/*******************************************************************//**
79
This utility flushes dirty blocks from the end of the LRU list or flush_list.
80
NOTE 1: in the case of an LRU flush the calling thread may own latches to
81
pages: to avoid deadlocks, this function must be written so that it cannot
82
end up waiting for these latches! NOTE 2: in the case of a flush list flush,
83
the calling thread is not allowed to own any latches on pages!
84
@return number of blocks for which the write request was queued;
85
ULINT_UNDEFINED if there was a flush of the same type already running */
88
This utility flushes dirty blocks from the end of the LRU list.
89
NOTE: The calling thread may own latches to pages: to avoid deadlocks,
90
this function must be written so that it cannot end up waiting for these
92
@return number of blocks for which the write request was queued;
93
ULINT_UNDEFINED if there was a flush of the same type already running */
98
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
99
ulint min_n); /*!< in: wished minimum mumber of blocks
100
flushed (it is not guaranteed that the
101
actual number is that big, though) */
102
/*******************************************************************//**
103
This utility flushes dirty blocks from the end of the flush_list of
104
all buffer pool instances.
105
NOTE: The calling thread is not allowed to own any latches on pages!
106
@return number of blocks for which the write request was queued;
107
ULINT_UNDEFINED if there was a flush of the same type already running */
90
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU or
91
BUF_FLUSH_LIST; if BUF_FLUSH_LIST,
92
then the caller must not own any
94
112
ulint min_n, /*!< in: wished minimum mumber of blocks
95
113
flushed (it is not guaranteed that the
96
114
actual number is that big, though) */
106
124
buf_flush_wait_batch_end(
107
125
/*=====================*/
108
enum buf_flush type); /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
126
buf_pool_t* buf_pool, /*!< buffer pool instance */
127
enum buf_flush type); /*!< in: BUF_FLUSH_LRU
109
129
/********************************************************************//**
110
130
This function should be called at a mini-transaction commit, if a page was
111
131
modified in it. Puts the block to the list of modified blocks, if it not
150
170
struct buf_flush_stat_struct
152
ib_uint64_t redo; /*!< amount of redo generated. */
153
ulint n_flushed; /*!< number of pages flushed. */
172
ib_uint64_t redo; /**< amount of redo generated. */
173
ulint n_flushed; /**< number of pages flushed. */
156
176
/** Statistics for selecting flush rate of dirty pages. */
181
201
@return TRUE if ok */
184
buf_flush_validate(void);
185
/*====================*/
206
buf_pool_t* buf_pool);
186
207
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
188
/******************************************************************//**
209
/********************************************************************//**
189
210
Initialize the red-black tree to speed up insertions into the flush_list
190
211
during recovery process. Should be called at the start of recovery
191
212
process before any page has been read/written. */
194
215
buf_flush_init_flush_rbt(void);
195
216
/*==========================*/
197
/******************************************************************//**
218
/********************************************************************//**
198
219
Frees up the red-black tree. */
205
226
available to replacement in the free list and at the end of the LRU list (to
206
227
make sure that a read-ahead batch can be read efficiently in a single
208
#define BUF_FLUSH_FREE_BLOCK_MARGIN (5 + BUF_READ_AHEAD_AREA)
229
#define BUF_FLUSH_FREE_BLOCK_MARGIN(b) (5 + BUF_READ_AHEAD_AREA(b))
209
230
/** Extra margin to apply above BUF_FLUSH_FREE_BLOCK_MARGIN */
210
#define BUF_FLUSH_EXTRA_MARGIN (BUF_FLUSH_FREE_BLOCK_MARGIN / 4 + 100)
231
#define BUF_FLUSH_EXTRA_MARGIN(b) (BUF_FLUSH_FREE_BLOCK_MARGIN(b) / 4 \
211
233
#endif /* !UNIV_HOTBACKUP */
213
235
#ifndef UNIV_NONINL