330
330
buf_flush_list_mutex_enter(buf_pool);
332
/* The field in_LRU_list is protected by buf_pool_mutex, which
332
/* The field in_LRU_list is protected by buf_pool->mutex, which
333
333
we are not holding. However, while a block is in the flush
334
334
list, it is dirty and cannot be discarded, not from the
335
335
page_hash or from the LRU list. At most, the uncompressed
1155
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
1156
/********************************************************************//**
1157
Writes a flushable page asynchronously from the buffer pool to a file.
1158
NOTE: buf_pool->mutex and block->mutex must be held upon entering this
1159
function, and they will be released by this function after flushing.
1160
This is loosely based on buf_flush_batch() and buf_flush_page().
1161
@return TRUE if the page was flushed and the mutexes released */
1166
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
1167
buf_block_t* block) /*!< in/out: buffer control block */
1169
ut_ad(buf_pool_mutex_own(buf_pool));
1170
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1171
ut_ad(mutex_own(&block->mutex));
1173
if (!buf_flush_ready_for_flush(&block->page, BUF_FLUSH_LRU)) {
1177
if (buf_pool->n_flush[BUF_FLUSH_LRU] > 0
1178
|| buf_pool->init_flush[BUF_FLUSH_LRU]) {
1179
/* There is already a flush batch of the same type running */
1183
buf_pool->init_flush[BUF_FLUSH_LRU] = TRUE;
1185
buf_page_set_io_fix(&block->page, BUF_IO_WRITE);
1187
buf_page_set_flush_type(&block->page, BUF_FLUSH_LRU);
1189
if (buf_pool->n_flush[BUF_FLUSH_LRU]++ == 0) {
1191
os_event_reset(buf_pool->no_flush[BUF_FLUSH_LRU]);
1195
Because any thread may call the LRU flush, even when owning
1196
locks on pages, to avoid deadlocks, we must make sure that the
1197
s-lock is acquired on the page without waiting: this is
1198
accomplished because buf_flush_ready_for_flush() must hold,
1199
and that requires the page not to be bufferfixed. */
1201
rw_lock_s_lock_gen(&block->lock, BUF_IO_WRITE);
1203
/* Note that the s-latch is acquired before releasing the
1204
buf_pool mutex: this ensures that the latch is acquired
1207
mutex_exit(&block->mutex);
1208
buf_pool_mutex_exit(buf_pool);
1210
/* Even though block is not protected by any mutex at this
1211
point, it is safe to access block, because it is io_fixed and
1212
oldest_modification != 0. Thus, it cannot be relocated in the
1213
buffer pool or removed from flush_list or LRU_list. */
1215
buf_flush_write_block_low(&block->page);
1217
buf_pool_mutex_enter(buf_pool);
1218
buf_pool->init_flush[BUF_FLUSH_LRU] = FALSE;
1220
if (buf_pool->n_flush[BUF_FLUSH_LRU] == 0) {
1221
/* The running flush batch has ended */
1222
os_event_set(buf_pool->no_flush[BUF_FLUSH_LRU]);
1225
buf_pool_mutex_exit(buf_pool);
1226
buf_flush_buffered_writes();
1230
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
1155
1232
/********************************************************************//**
1156
1233
Writes a flushable page asynchronously from the buffer pool to a file.
1157
1234
NOTE: in simulated aio we must call
2133
2210
ut_ad(bpage->in_flush_list);
2135
/* A page in flush_list can be in BUF_BLOCK_REMOVE_HASH
2136
state. This happens when a page is in the middle of
2137
being relocated. In that case the original descriptor
2138
can have this state and still be in the flush list
2139
waiting to acquire the flush_list_mutex to complete
2212
/* A page in buf_pool->flush_list can be in
2213
BUF_BLOCK_REMOVE_HASH state. This happens when a page
2214
is in the middle of being relocated. In that case the
2215
original descriptor can have this state and still be
2216
in the flush list waiting to acquire the
2217
buf_pool->flush_list_mutex to complete the relocation. */
2141
2218
ut_a(buf_page_in_file(bpage)
2142
2219
|| buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);