261
228
buf_flush_insert_into_flush_list(
262
229
/*=============================*/
263
buf_pool_t* buf_pool, /*!< buffer pool instance */
264
buf_block_t* block, /*!< in/out: block which is modified */
265
ib_uint64_t lsn) /*!< in: oldest modification */
230
buf_block_t* block) /*!< in/out: block which is modified */
267
ut_ad(!buf_pool_mutex_own(buf_pool));
268
ut_ad(log_flush_order_mutex_own());
269
ut_ad(mutex_own(&block->mutex));
271
buf_flush_list_mutex_enter(buf_pool);
232
ut_ad(buf_pool_mutex_own());
273
233
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
274
234
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
235
<= block->page.oldest_modification));
277
237
/* If we are in the recovery then we need to update the flush
278
238
red-black tree as well. */
279
239
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
280
buf_flush_list_mutex_exit(buf_pool);
281
buf_flush_insert_sorted_into_flush_list(buf_pool, block, lsn);
240
buf_flush_insert_sorted_into_flush_list(block);
285
244
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
245
ut_ad(block->page.in_LRU_list);
246
ut_ad(block->page.in_page_hash);
247
ut_ad(!block->page.in_zip_hash);
286
248
ut_ad(!block->page.in_flush_list);
288
249
ut_d(block->page.in_flush_list = TRUE);
289
block->page.oldest_modification = lsn;
290
250
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page);
292
#ifdef UNIV_DEBUG_VALGRIND
294
ulint zip_size = buf_block_get_zip_size(block);
296
if (UNIV_UNLIKELY(zip_size)) {
297
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
299
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
302
#endif /* UNIV_DEBUG_VALGRIND */
303
252
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
304
ut_a(buf_flush_validate_low(buf_pool));
253
ut_a(buf_flush_validate_low());
305
254
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
307
buf_flush_list_mutex_exit(buf_pool);
310
257
/********************************************************************//**
316
263
buf_flush_insert_sorted_into_flush_list(
317
264
/*====================================*/
318
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
319
buf_block_t* block, /*!< in/out: block which is modified */
320
ib_uint64_t lsn) /*!< in: oldest modification */
265
buf_block_t* block) /*!< in/out: block which is modified */
322
267
buf_page_t* prev_b;
325
ut_ad(!buf_pool_mutex_own(buf_pool));
326
ut_ad(log_flush_order_mutex_own());
327
ut_ad(mutex_own(&block->mutex));
270
ut_ad(buf_pool_mutex_own());
328
271
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
330
buf_flush_list_mutex_enter(buf_pool);
332
/* The field in_LRU_list is protected by buf_pool->mutex, which
333
we are not holding. However, while a block is in the flush
334
list, it is dirty and cannot be discarded, not from the
335
page_hash or from the LRU list. At most, the uncompressed
336
page frame of a compressed block may be discarded or created
337
(copying the block->page to or from a buf_page_t that is
338
dynamically allocated from buf_buddy_alloc()). Because those
339
transitions hold block->mutex and the flush list mutex (via
340
buf_flush_relocate_on_flush_list()), there is no possibility
341
of a race condition in the assertions below. */
342
273
ut_ad(block->page.in_LRU_list);
343
274
ut_ad(block->page.in_page_hash);
344
/* buf_buddy_block_register() will take a block in the
345
BUF_BLOCK_MEMORY state, not a file page. */
346
275
ut_ad(!block->page.in_zip_hash);
348
276
ut_ad(!block->page.in_flush_list);
349
277
ut_d(block->page.in_flush_list = TRUE);
350
block->page.oldest_modification = lsn;
352
#ifdef UNIV_DEBUG_VALGRIND
354
ulint zip_size = buf_block_get_zip_size(block);
356
if (UNIV_UNLIKELY(zip_size)) {
357
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
359
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
362
#endif /* UNIV_DEBUG_VALGRIND */
364
#ifdef UNIV_DEBUG_VALGRIND
366
ulint zip_size = buf_block_get_zip_size(block);
368
if (UNIV_UNLIKELY(zip_size)) {
369
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
371
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
374
#endif /* UNIV_DEBUG_VALGRIND */
1155
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
1156
/********************************************************************//**
1157
Writes a flushable page asynchronously from the buffer pool to a file.
1158
NOTE: buf_pool->mutex and block->mutex must be held upon entering this
1159
function, and they will be released by this function after flushing.
1160
This is loosely based on buf_flush_batch() and buf_flush_page().
1161
@return TRUE if the page was flushed and the mutexes released */
1166
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
1167
buf_block_t* block) /*!< in/out: buffer control block */
1169
ut_ad(buf_pool_mutex_own(buf_pool));
1170
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1171
ut_ad(mutex_own(&block->mutex));
1173
if (!buf_flush_ready_for_flush(&block->page, BUF_FLUSH_LRU)) {
1177
if (buf_pool->n_flush[BUF_FLUSH_LRU] > 0
1178
|| buf_pool->init_flush[BUF_FLUSH_LRU]) {
1179
/* There is already a flush batch of the same type running */
1183
buf_pool->init_flush[BUF_FLUSH_LRU] = TRUE;
1185
buf_page_set_io_fix(&block->page, BUF_IO_WRITE);
1187
buf_page_set_flush_type(&block->page, BUF_FLUSH_LRU);
1189
if (buf_pool->n_flush[BUF_FLUSH_LRU]++ == 0) {
1191
os_event_reset(buf_pool->no_flush[BUF_FLUSH_LRU]);
1195
Because any thread may call the LRU flush, even when owning
1196
locks on pages, to avoid deadlocks, we must make sure that the
1197
s-lock is acquired on the page without waiting: this is
1198
accomplished because buf_flush_ready_for_flush() must hold,
1199
and that requires the page not to be bufferfixed. */
1201
rw_lock_s_lock_gen(&block->lock, BUF_IO_WRITE);
1203
/* Note that the s-latch is acquired before releasing the
1204
buf_pool mutex: this ensures that the latch is acquired
1207
mutex_exit(&block->mutex);
1208
buf_pool_mutex_exit(buf_pool);
1210
/* Even though block is not protected by any mutex at this
1211
point, it is safe to access block, because it is io_fixed and
1212
oldest_modification != 0. Thus, it cannot be relocated in the
1213
buffer pool or removed from flush_list or LRU_list. */
1215
buf_flush_write_block_low(&block->page);
1217
buf_pool_mutex_enter(buf_pool);
1218
buf_pool->init_flush[BUF_FLUSH_LRU] = FALSE;
1220
if (buf_pool->n_flush[BUF_FLUSH_LRU] == 0) {
1221
/* The running flush batch has ended */
1222
os_event_set(buf_pool->no_flush[BUF_FLUSH_LRU]);
1225
buf_pool_mutex_exit(buf_pool);
1226
buf_flush_buffered_writes();
1230
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
1232
1011
/********************************************************************//**
1233
1012
Writes a flushable page asynchronously from the buffer pool to a file.
1234
1013
NOTE: in simulated aio we must call
1235
1014
os_aio_simulated_wake_handler_threads after we have posted a batch of
1236
writes! NOTE: buf_pool->mutex and buf_page_get_mutex(bpage) must be
1015
writes! NOTE: buf_pool_mutex and buf_page_get_mutex(bpage) must be
1237
1016
held upon entering this function, and they will be released by this
1241
1020
buf_flush_page(
1242
1021
/*===========*/
1243
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1244
1022
buf_page_t* bpage, /*!< in: buffer control block */
1245
1023
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1246
1024
or BUF_FLUSH_LIST */
1353
1131
/*====================*/
1354
1132
ulint space, /*!< in: space id */
1355
1133
ulint offset, /*!< in: page offset */
1356
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU or
1134
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU or
1357
1135
BUF_FLUSH_LIST */
1358
ulint n_flushed, /*!< in: number of pages
1359
flushed so far in this batch */
1360
ulint n_to_flush) /*!< in: maximum number of pages
1361
we are allowed to flush */
1367
buf_pool_t* buf_pool = buf_pool_get(space, offset);
1369
1142
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1371
1144
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
1372
/* If there is little space, it is better not to flush
1373
any block except from the end of the LRU list */
1145
/* If there is little space, it is better not to flush any
1146
block except from the end of the LRU list */
1376
1149
high = offset + 1;
1378
/* When flushed, dirty blocks are searched in
1379
neighborhoods of this size, and flushed along with the
1151
/* When flushed, dirty blocks are searched in neighborhoods of
1152
this size, and flushed along with the original page. */
1382
ulint buf_flush_area;
1384
buf_flush_area = ut_min(
1385
BUF_READ_AHEAD_AREA(buf_pool),
1386
buf_pool->curr_size / 16);
1154
ulint buf_flush_area = ut_min(BUF_READ_AHEAD_AREA,
1155
buf_pool->curr_size / 16);
1388
1157
low = (offset / buf_flush_area) * buf_flush_area;
1389
1158
high = (offset / buf_flush_area + 1) * buf_flush_area;
1442
1190
if (buf_flush_ready_for_flush(bpage, flush_type)
1443
1191
&& (i == offset || !bpage->buf_fix_count)) {
1444
1192
/* We only try to flush those
1445
neighbors != offset where the buf fix
1446
count is zero, as we then know that we
1447
probably can latch the page without a
1448
semaphore wait. Semaphore waits are
1449
expensive because we must flush the
1450
doublewrite buffer before we start
1193
neighbors != offset where the buf fix count is
1194
zero, as we then know that we probably can
1195
latch the page without a semaphore wait.
1196
Semaphore waits are expensive because we must
1197
flush the doublewrite buffer before we start
1453
buf_flush_page(buf_pool, bpage, flush_type);
1200
buf_flush_page(bpage, flush_type);
1454
1201
ut_ad(!mutex_own(block_mutex));
1455
ut_ad(!buf_pool_mutex_own(buf_pool));
1204
buf_pool_mutex_enter();
1459
1206
mutex_exit(block_mutex);
1462
buf_pool_mutex_exit(buf_pool);
1468
/********************************************************************//**
1469
Check if the block is modified and ready for flushing. If the the block
1470
is ready to flush then flush the page and try o flush its neighbors.
1472
@return TRUE if buf_pool mutex was not released during this function.
1473
This does not guarantee that some pages were written as well.
1474
Number of pages written are incremented to the count. */
1477
buf_flush_page_and_try_neighbors(
1478
/*=============================*/
1479
buf_page_t* bpage, /*!< in: buffer control block,
1481
buf_page_in_file(bpage) */
1482
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU
1483
or BUF_FLUSH_LIST */
1484
ulint n_to_flush, /*!< in: number of pages to
1486
ulint* count) /*!< in/out: number of pages
1489
mutex_t* block_mutex;
1490
ibool flushed = FALSE;
1492
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1493
#endif /* UNIV_DEBUG */
1495
ut_ad(buf_pool_mutex_own(buf_pool));
1497
block_mutex = buf_page_get_mutex(bpage);
1498
mutex_enter(block_mutex);
1500
ut_a(buf_page_in_file(bpage));
1502
if (buf_flush_ready_for_flush(bpage, flush_type)) {
1505
buf_pool_t* buf_pool;
1507
buf_pool = buf_pool_from_bpage(bpage);
1509
buf_pool_mutex_exit(buf_pool);
1511
/* These fields are protected by both the
1512
buffer pool mutex and block mutex. */
1513
space = buf_page_get_space(bpage);
1514
offset = buf_page_get_page_no(bpage);
1516
mutex_exit(block_mutex);
1518
/* Try to flush also all the neighbors */
1519
*count += buf_flush_try_neighbors(space,
1525
buf_pool_mutex_enter(buf_pool);
1528
mutex_exit(block_mutex);
1531
ut_ad(buf_pool_mutex_own(buf_pool));
1536
/*******************************************************************//**
1537
This utility flushes dirty blocks from the end of the LRU list.
1538
In the case of an LRU flush the calling thread may own latches to
1539
pages: to avoid deadlocks, this function must be written so that it
1540
cannot end up waiting for these latches!
1541
@return number of blocks for which the write request was queued. */
1544
buf_flush_LRU_list_batch(
1545
/*=====================*/
1546
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1547
ulint max) /*!< in: max of blocks to flush */
1552
ut_ad(buf_pool_mutex_own(buf_pool));
1555
/* Start from the end of the list looking for a
1556
suitable block to be flushed. */
1557
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1559
/* Iterate backwards over the flush list till we find
1560
a page that isn't ready for flushing. */
1561
while (bpage != NULL
1562
&& !buf_flush_page_and_try_neighbors(
1563
bpage, BUF_FLUSH_LRU, max, &count)) {
1565
bpage = UT_LIST_GET_PREV(LRU, bpage);
1567
} while (bpage != NULL && count < max);
1569
/* We keep track of all flushes happening as part of LRU
1570
flush. When estimating the desired rate at which flush_list
1571
should be flushed, we factor in this value. */
1572
buf_lru_flush_page_count += count;
1574
ut_ad(buf_pool_mutex_own(buf_pool));
1579
/*******************************************************************//**
1580
This utility flushes dirty blocks from the end of the flush_list.
1581
the calling thread is not allowed to own any latches on pages!
1582
@return number of blocks for which the write request was queued;
1583
ULINT_UNDEFINED if there was a flush of the same type already
1587
buf_flush_flush_list_batch(
1588
/*=======================*/
1589
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1590
ulint min_n, /*!< in: wished minimum mumber
1591
of blocks flushed (it is not
1592
guaranteed that the actual
1593
number is that big, though) */
1594
ib_uint64_t lsn_limit) /*!< all blocks whose
1595
oldest_modification is smaller
1596
than this should be flushed (if
1597
their number does not exceed
1604
ut_ad(buf_pool_mutex_own(buf_pool));
1606
/* If we have flushed enough, leave the loop */
1608
/* Start from the end of the list looking for a suitable
1609
block to be flushed. */
1611
buf_flush_list_mutex_enter(buf_pool);
1613
/* We use len here because theoretically insertions can
1614
happen in the flush_list below while we are traversing
1615
it for a suitable candidate for flushing. We'd like to
1616
set a limit on how farther we are willing to traverse
1618
len = UT_LIST_GET_LEN(buf_pool->flush_list);
1619
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1622
ut_a(bpage->oldest_modification > 0);
1625
if (!bpage || bpage->oldest_modification >= lsn_limit) {
1627
/* We have flushed enough */
1628
buf_flush_list_mutex_exit(buf_pool);
1632
ut_a(bpage->oldest_modification > 0);
1634
ut_ad(bpage->in_flush_list);
1636
buf_flush_list_mutex_exit(buf_pool);
1638
/* The list may change during the flushing and we cannot
1639
safely preserve within this function a pointer to a
1640
block in the list! */
1641
while (bpage != NULL
1643
&& !buf_flush_page_and_try_neighbors(
1644
bpage, BUF_FLUSH_LIST, min_n, &count)) {
1646
buf_flush_list_mutex_enter(buf_pool);
1648
/* If we are here that means that buf_pool->mutex
1649
was not released in buf_flush_page_and_try_neighbors()
1650
above and this guarantees that bpage didn't get
1651
relocated since we released the flush_list
1652
mutex above. There is a chance, however, that
1653
the bpage got removed from flush_list (not
1654
currently possible because flush_list_remove()
1655
also obtains buf_pool mutex but that may change
1656
in future). To avoid this scenario we check
1657
the oldest_modification and if it is zero
1658
we start all over again. */
1659
if (bpage->oldest_modification == 0) {
1660
buf_flush_list_mutex_exit(buf_pool);
1664
bpage = UT_LIST_GET_PREV(list, bpage);
1666
ut_ad(!bpage || bpage->in_flush_list);
1668
buf_flush_list_mutex_exit(buf_pool);
1673
} while (count < min_n && bpage != NULL && len > 0);
1675
ut_ad(buf_pool_mutex_own(buf_pool));
1211
buf_pool_mutex_exit();
1697
1232
ulint min_n, /*!< in: wished minimum mumber of blocks
1698
1233
flushed (it is not guaranteed that the
1699
1234
actual number is that big, though) */
1700
ib_uint64_t lsn_limit) /*!< in: in the case of BUF_FLUSH_LIST
1701
all blocks whose oldest_modification is
1235
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1236
blocks whose oldest_modification is
1702
1237
smaller than this should be flushed
1703
1238
(if their number does not exceed
1704
1239
min_n), otherwise ignored */
1242
ulint page_count = 0;
1243
ulint old_page_count;
1708
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1247
ut_ad((flush_type == BUF_FLUSH_LRU)
1248
|| (flush_type == BUF_FLUSH_LIST));
1709
1249
#ifdef UNIV_SYNC_DEBUG
1710
1250
ut_ad((flush_type != BUF_FLUSH_LIST)
1711
1251
|| sync_thread_levels_empty_gen(TRUE));
1712
1252
#endif /* UNIV_SYNC_DEBUG */
1714
buf_pool_mutex_enter(buf_pool);
1716
/* Note: The buffer pool mutex is released and reacquired within
1717
the flush functions. */
1718
switch(flush_type) {
1720
count = buf_flush_LRU_list_batch(buf_pool, min_n);
1722
case BUF_FLUSH_LIST:
1723
count = buf_flush_flush_list_batch(buf_pool, min_n, lsn_limit);
1729
buf_pool_mutex_exit(buf_pool);
1731
buf_flush_buffered_writes();
1734
if (buf_debug_prints && count > 0) {
1735
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1736
? "Flushed %lu pages in LRU flush\n"
1737
: "Flushed %lu pages in flush list flush\n",
1740
#endif /* UNIV_DEBUG */
1742
srv_buf_pool_flushed += count;
1747
/******************************************************************//**
1748
Gather the aggregated stats for both flush list and LRU list flushing */
1753
enum buf_flush flush_type, /*!< in: type of flush */
1754
ulint page_count) /*!< in: number of pages flushed */
1756
buf_flush_buffered_writes();
1758
ut_a(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1253
buf_pool_mutex_enter();
1255
if ((buf_pool->n_flush[flush_type] > 0)
1256
|| (buf_pool->init_flush[flush_type] == TRUE)) {
1258
/* There is already a flush batch of the same type running */
1260
buf_pool_mutex_exit();
1262
return(ULINT_UNDEFINED);
1265
buf_pool->init_flush[flush_type] = TRUE;
1267
bool done_with_loop= false;
1268
for (;done_with_loop != true;) {
1270
/* If we have flushed enough, leave the loop */
1271
if (page_count >= min_n) {
1276
/* Start from the end of the list looking for a suitable
1277
block to be flushed. */
1279
if (flush_type == BUF_FLUSH_LRU) {
1280
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1282
ut_ad(flush_type == BUF_FLUSH_LIST);
1284
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1286
|| bpage->oldest_modification >= lsn_limit) {
1287
/* We have flushed enough */
1291
ut_ad(bpage->in_flush_list);
1294
/* Note that after finding a single flushable page, we try to
1295
flush also all its neighbors, and after that start from the
1296
END of the LRU list or flush list again: the list may change
1297
during the flushing and we cannot safely preserve within this
1298
function a pointer to a block in the list! */
1301
mutex_t*block_mutex = buf_page_get_mutex(bpage);
1304
ut_a(buf_page_in_file(bpage));
1306
mutex_enter(block_mutex);
1307
ready = buf_flush_ready_for_flush(bpage, flush_type);
1308
mutex_exit(block_mutex);
1311
space = buf_page_get_space(bpage);
1312
offset = buf_page_get_page_no(bpage);
1314
buf_pool_mutex_exit();
1316
old_page_count = page_count;
1318
/* Try to flush also all the neighbors */
1319
page_count += buf_flush_try_neighbors(
1320
space, offset, flush_type);
1322
"Flush type %lu, page no %lu, neighb %lu\n",
1324
page_count - old_page_count); */
1326
buf_pool_mutex_enter();
1329
} else if (flush_type == BUF_FLUSH_LRU) {
1330
bpage = UT_LIST_GET_PREV(LRU, bpage);
1332
ut_ad(flush_type == BUF_FLUSH_LIST);
1334
bpage = UT_LIST_GET_PREV(list, bpage);
1335
ut_ad(!bpage || bpage->in_flush_list);
1337
} while (bpage != NULL);
1339
/* If we could not find anything to flush, leave the loop */
1341
done_with_loop= true;
1345
buf_pool->init_flush[flush_type] = FALSE;
1347
if (buf_pool->n_flush[flush_type] == 0) {
1349
/* The running flush batch has ended */
1351
os_event_set(buf_pool->no_flush[flush_type]);
1354
buf_pool_mutex_exit();
1356
buf_flush_buffered_writes();
1760
1358
#ifdef UNIV_DEBUG
1761
1359
if (buf_debug_prints && page_count > 0) {
1360
ut_a(flush_type == BUF_FLUSH_LRU
1361
|| flush_type == BUF_FLUSH_LIST);
1762
1362
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1763
1363
? "Flushed %lu pages in LRU flush\n"
1764
1364
: "Flushed %lu pages in flush list flush\n",
1769
1369
srv_buf_pool_flushed += page_count;
1371
/* We keep track of all flushes happening as part of LRU
1372
flush. When estimating the desired rate at which flush_list
1373
should be flushed we factor in this value. */
1771
1374
if (flush_type == BUF_FLUSH_LRU) {
1772
/* We keep track of all flushes happening as part of LRU
1773
flush. When estimating the desired rate at which flush_list
1774
should be flushed we factor in this value. */
1775
1375
buf_lru_flush_page_count += page_count;
1779
/******************************************************************//**
1780
Start a buffer flush batch for LRU or flush list */
1785
buf_pool_t* buf_pool, /*!< buffer pool instance */
1786
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1787
or BUF_FLUSH_LIST */
1789
buf_pool_mutex_enter(buf_pool);
1791
if (buf_pool->n_flush[flush_type] > 0
1792
|| buf_pool->init_flush[flush_type] == TRUE) {
1794
/* There is already a flush batch of the same type running */
1796
buf_pool_mutex_exit(buf_pool);
1801
buf_pool->init_flush[flush_type] = TRUE;
1803
buf_pool_mutex_exit(buf_pool);
1808
/******************************************************************//**
1809
End a buffer flush batch for LRU or flush list */
1814
buf_pool_t* buf_pool, /*!< buffer pool instance */
1815
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1816
or BUF_FLUSH_LIST */
1818
buf_pool_mutex_enter(buf_pool);
1820
buf_pool->init_flush[flush_type] = FALSE;
1822
if (buf_pool->n_flush[flush_type] == 0) {
1824
/* The running flush batch has ended */
1826
os_event_set(buf_pool->no_flush[flush_type]);
1829
buf_pool_mutex_exit(buf_pool);
1832
1381
/******************************************************************//**
1836
1385
buf_flush_wait_batch_end(
1837
1386
/*=====================*/
1838
buf_pool_t* buf_pool, /*!< buffer pool instance */
1839
enum buf_flush type) /*!< in: BUF_FLUSH_LRU
1840
or BUF_FLUSH_LIST */
1842
ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST);
1844
if (buf_pool == NULL) {
1847
for (i = 0; i < srv_buf_pool_instances; ++i) {
1848
buf_pool_t* i_buf_pool = buf_pool_from_array(i);
1850
os_event_wait(i_buf_pool->no_flush[type]);
1853
os_event_wait(buf_pool->no_flush[type]);
1857
/*******************************************************************//**
1858
This utility flushes dirty blocks from the end of the LRU list.
1859
NOTE: The calling thread may own latches to pages: to avoid deadlocks,
1860
this function must be written so that it cannot end up waiting for these
1862
@return number of blocks for which the write request was queued;
1863
ULINT_UNDEFINED if there was a flush of the same type already running */
1868
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1869
ulint min_n) /*!< in: wished minimum mumber of blocks
1870
flushed (it is not guaranteed that the
1871
actual number is that big, though) */
1875
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
1876
return(ULINT_UNDEFINED);
1879
page_count = buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0);
1881
buf_flush_end(buf_pool, BUF_FLUSH_LRU);
1883
buf_flush_common(BUF_FLUSH_LRU, page_count);
1888
/*******************************************************************//**
1889
This utility flushes dirty blocks from the end of the flush list of
1890
all buffer pool instances.
1891
NOTE: The calling thread is not allowed to own any latches on pages!
1892
@return number of blocks for which the write request was queued;
1893
ULINT_UNDEFINED if there was a flush of the same type already running */
1898
ulint min_n, /*!< in: wished minimum mumber of blocks
1899
flushed (it is not guaranteed that the
1900
actual number is that big, though) */
1901
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1902
blocks whose oldest_modification is
1903
smaller than this should be flushed
1904
(if their number does not exceed
1905
min_n), otherwise ignored */
1908
ulint total_page_count = 0;
1909
ibool skipped = FALSE;
1911
if (min_n != ULINT_MAX) {
1912
/* Ensure that flushing is spread evenly amongst the
1913
buffer pool instances. When min_n is ULINT_MAX
1914
we need to flush everything up to the lsn limit
1915
so no limit here. */
1916
min_n = (min_n + srv_buf_pool_instances - 1)
1917
/ srv_buf_pool_instances;
1920
/* Flush to lsn_limit in all buffer pool instances */
1921
for (i = 0; i < srv_buf_pool_instances; i++) {
1922
buf_pool_t* buf_pool;
1923
ulint page_count = 0;
1925
buf_pool = buf_pool_from_array(i);
1927
if (!buf_flush_start(buf_pool, BUF_FLUSH_LIST)) {
1928
/* We have two choices here. If lsn_limit was
1929
specified then skipping an instance of buffer
1930
pool means we cannot guarantee that all pages
1931
up to lsn_limit has been flushed. We can
1932
return right now with failure or we can try
1933
to flush remaining buffer pools up to the
1934
lsn_limit. We attempt to flush other buffer
1935
pools based on the assumption that it will
1936
help in the retry which will follow the
1943
page_count = buf_flush_batch(
1944
buf_pool, BUF_FLUSH_LIST, min_n, lsn_limit);
1946
buf_flush_end(buf_pool, BUF_FLUSH_LIST);
1948
buf_flush_common(BUF_FLUSH_LIST, page_count);
1950
total_page_count += page_count;
1953
return(lsn_limit != IB_ULONGLONG_MAX && skipped
1954
? ULINT_UNDEFINED : total_page_count);
1387
enum buf_flush type) /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
1389
ut_ad((type == BUF_FLUSH_LRU) || (type == BUF_FLUSH_LIST));
1391
os_event_wait(buf_pool->no_flush[type]);
1957
1394
/******************************************************************//**
1958
1395
Gives a recommendation of how many blocks should be flushed to establish
1959
1396
a big enough margin of replaceable blocks near the end of the LRU list
2016
1451
immediately, without waiting. */
2019
buf_flush_free_margin(
2020
/*==================*/
2021
buf_pool_t* buf_pool) /*!< in: Buffer pool instance */
1454
buf_flush_free_margin(void)
1455
/*=======================*/
2023
1457
ulint n_to_flush;
2025
n_to_flush = buf_flush_LRU_recommendation(buf_pool);
1460
n_to_flush = buf_flush_LRU_recommendation();
2027
1462
if (n_to_flush > 0) {
2030
n_flushed = buf_flush_LRU(buf_pool, n_to_flush);
1463
n_flushed = buf_flush_batch(BUF_FLUSH_LRU, n_to_flush, 0);
2032
1464
if (n_flushed == ULINT_UNDEFINED) {
2033
1465
/* There was an LRU type flush batch already running;
2034
1466
let us wait for it to end */
2036
buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU);
1468
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
2041
/*********************************************************************//**
2042
Flushes pages from the end of all the LRU lists. */
2045
buf_flush_free_margins(void)
2046
/*========================*/
2050
for (i = 0; i < srv_buf_pool_instances; i++) {
2051
buf_pool_t* buf_pool;
2053
buf_pool = buf_pool_from_array(i);
2055
buf_flush_free_margin(buf_pool);
2059
1473
/*********************************************************************
2060
1474
Update the historical stats that we are collecting for flush rate
2061
1475
heuristics at the end of each interval.