261
228
buf_flush_insert_into_flush_list(
262
229
/*=============================*/
263
buf_pool_t* buf_pool, /*!< buffer pool instance */
264
buf_block_t* block, /*!< in/out: block which is modified */
265
ib_uint64_t lsn) /*!< in: oldest modification */
230
buf_block_t* block) /*!< in/out: block which is modified */
267
ut_ad(!buf_pool_mutex_own(buf_pool));
268
ut_ad(log_flush_order_mutex_own());
269
ut_ad(mutex_own(&block->mutex));
271
buf_flush_list_mutex_enter(buf_pool);
232
ut_ad(buf_pool_mutex_own());
273
233
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
274
234
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
235
<= block->page.oldest_modification));
277
237
/* If we are in the recovery then we need to update the flush
278
238
red-black tree as well. */
279
239
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
280
buf_flush_list_mutex_exit(buf_pool);
281
buf_flush_insert_sorted_into_flush_list(buf_pool, block, lsn);
240
buf_flush_insert_sorted_into_flush_list(block);
285
244
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
245
ut_ad(block->page.in_LRU_list);
246
ut_ad(block->page.in_page_hash);
247
ut_ad(!block->page.in_zip_hash);
286
248
ut_ad(!block->page.in_flush_list);
288
249
ut_d(block->page.in_flush_list = TRUE);
289
block->page.oldest_modification = lsn;
290
250
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page);
292
#ifdef UNIV_DEBUG_VALGRIND
294
ulint zip_size = buf_block_get_zip_size(block);
296
if (UNIV_UNLIKELY(zip_size)) {
297
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
299
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
302
#endif /* UNIV_DEBUG_VALGRIND */
303
252
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
304
ut_a(buf_flush_validate_low(buf_pool));
253
ut_a(buf_flush_validate_low());
305
254
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
307
buf_flush_list_mutex_exit(buf_pool);
310
257
/********************************************************************//**
316
263
buf_flush_insert_sorted_into_flush_list(
317
264
/*====================================*/
318
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
319
buf_block_t* block, /*!< in/out: block which is modified */
320
ib_uint64_t lsn) /*!< in: oldest modification */
265
buf_block_t* block) /*!< in/out: block which is modified */
322
267
buf_page_t* prev_b;
325
ut_ad(!buf_pool_mutex_own(buf_pool));
326
ut_ad(log_flush_order_mutex_own());
327
ut_ad(mutex_own(&block->mutex));
270
ut_ad(buf_pool_mutex_own());
328
271
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
330
buf_flush_list_mutex_enter(buf_pool);
332
/* The field in_LRU_list is protected by buf_pool_mutex, which
333
we are not holding. However, while a block is in the flush
334
list, it is dirty and cannot be discarded, not from the
335
page_hash or from the LRU list. At most, the uncompressed
336
page frame of a compressed block may be discarded or created
337
(copying the block->page to or from a buf_page_t that is
338
dynamically allocated from buf_buddy_alloc()). Because those
339
transitions hold block->mutex and the flush list mutex (via
340
buf_flush_relocate_on_flush_list()), there is no possibility
341
of a race condition in the assertions below. */
342
273
ut_ad(block->page.in_LRU_list);
343
274
ut_ad(block->page.in_page_hash);
344
/* buf_buddy_block_register() will take a block in the
345
BUF_BLOCK_MEMORY state, not a file page. */
346
275
ut_ad(!block->page.in_zip_hash);
348
276
ut_ad(!block->page.in_flush_list);
349
277
ut_d(block->page.in_flush_list = TRUE);
350
block->page.oldest_modification = lsn;
352
#ifdef UNIV_DEBUG_VALGRIND
354
ulint zip_size = buf_block_get_zip_size(block);
356
if (UNIV_UNLIKELY(zip_size)) {
357
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
359
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
362
#endif /* UNIV_DEBUG_VALGRIND */
364
#ifdef UNIV_DEBUG_VALGRIND
366
ulint zip_size = buf_block_get_zip_size(block);
368
if (UNIV_UNLIKELY(zip_size)) {
369
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
371
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
374
#endif /* UNIV_DEBUG_VALGRIND */
1276
1131
/*====================*/
1277
1132
ulint space, /*!< in: space id */
1278
1133
ulint offset, /*!< in: page offset */
1279
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU or
1134
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU or
1280
1135
BUF_FLUSH_LIST */
1281
ulint n_flushed, /*!< in: number of pages
1282
flushed so far in this batch */
1283
ulint n_to_flush) /*!< in: maximum number of pages
1284
we are allowed to flush */
1290
buf_pool_t* buf_pool = buf_pool_get(space, offset);
1292
1142
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1294
1144
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {
1295
/* If there is little space, it is better not to flush
1296
any block except from the end of the LRU list */
1145
/* If there is little space, it is better not to flush any
1146
block except from the end of the LRU list */
1299
1149
high = offset + 1;
1301
/* When flushed, dirty blocks are searched in
1302
neighborhoods of this size, and flushed along with the
1151
/* When flushed, dirty blocks are searched in neighborhoods of
1152
this size, and flushed along with the original page. */
1305
ulint buf_flush_area;
1307
buf_flush_area = ut_min(
1308
BUF_READ_AHEAD_AREA(buf_pool),
1309
buf_pool->curr_size / 16);
1154
ulint buf_flush_area = ut_min(BUF_READ_AHEAD_AREA,
1155
buf_pool->curr_size / 16);
1311
1157
low = (offset / buf_flush_area) * buf_flush_area;
1312
1158
high = (offset / buf_flush_area + 1) * buf_flush_area;
1365
1190
if (buf_flush_ready_for_flush(bpage, flush_type)
1366
1191
&& (i == offset || !bpage->buf_fix_count)) {
1367
1192
/* We only try to flush those
1368
neighbors != offset where the buf fix
1369
count is zero, as we then know that we
1370
probably can latch the page without a
1371
semaphore wait. Semaphore waits are
1372
expensive because we must flush the
1373
doublewrite buffer before we start
1193
neighbors != offset where the buf fix count is
1194
zero, as we then know that we probably can
1195
latch the page without a semaphore wait.
1196
Semaphore waits are expensive because we must
1197
flush the doublewrite buffer before we start
1376
buf_flush_page(buf_pool, bpage, flush_type);
1200
buf_flush_page(bpage, flush_type);
1377
1201
ut_ad(!mutex_own(block_mutex));
1378
ut_ad(!buf_pool_mutex_own(buf_pool));
1204
buf_pool_mutex_enter();
1382
1206
mutex_exit(block_mutex);
1385
buf_pool_mutex_exit(buf_pool);
1391
/********************************************************************//**
1392
Check if the block is modified and ready for flushing. If the the block
1393
is ready to flush then flush the page and try o flush its neighbors.
1395
@return TRUE if buf_pool mutex was not released during this function.
1396
This does not guarantee that some pages were written as well.
1397
Number of pages written are incremented to the count. */
1400
buf_flush_page_and_try_neighbors(
1401
/*=============================*/
1402
buf_page_t* bpage, /*!< in: buffer control block,
1404
buf_page_in_file(bpage) */
1405
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU
1406
or BUF_FLUSH_LIST */
1407
ulint n_to_flush, /*!< in: number of pages to
1409
ulint* count) /*!< in/out: number of pages
1412
mutex_t* block_mutex;
1413
ibool flushed = FALSE;
1415
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1416
#endif /* UNIV_DEBUG */
1418
ut_ad(buf_pool_mutex_own(buf_pool));
1420
block_mutex = buf_page_get_mutex(bpage);
1421
mutex_enter(block_mutex);
1423
ut_a(buf_page_in_file(bpage));
1425
if (buf_flush_ready_for_flush(bpage, flush_type)) {
1428
buf_pool_t* buf_pool;
1430
buf_pool = buf_pool_from_bpage(bpage);
1432
buf_pool_mutex_exit(buf_pool);
1434
/* These fields are protected by both the
1435
buffer pool mutex and block mutex. */
1436
space = buf_page_get_space(bpage);
1437
offset = buf_page_get_page_no(bpage);
1439
mutex_exit(block_mutex);
1441
/* Try to flush also all the neighbors */
1442
*count += buf_flush_try_neighbors(space,
1448
buf_pool_mutex_enter(buf_pool);
1451
mutex_exit(block_mutex);
1454
ut_ad(buf_pool_mutex_own(buf_pool));
1459
/*******************************************************************//**
1460
This utility flushes dirty blocks from the end of the LRU list.
1461
In the case of an LRU flush the calling thread may own latches to
1462
pages: to avoid deadlocks, this function must be written so that it
1463
cannot end up waiting for these latches!
1464
@return number of blocks for which the write request was queued. */
1467
buf_flush_LRU_list_batch(
1468
/*=====================*/
1469
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1470
ulint max) /*!< in: max of blocks to flush */
1475
ut_ad(buf_pool_mutex_own(buf_pool));
1478
/* Start from the end of the list looking for a
1479
suitable block to be flushed. */
1480
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1482
/* Iterate backwards over the flush list till we find
1483
a page that isn't ready for flushing. */
1484
while (bpage != NULL
1485
&& !buf_flush_page_and_try_neighbors(
1486
bpage, BUF_FLUSH_LRU, max, &count)) {
1488
bpage = UT_LIST_GET_PREV(LRU, bpage);
1490
} while (bpage != NULL && count < max);
1492
/* We keep track of all flushes happening as part of LRU
1493
flush. When estimating the desired rate at which flush_list
1494
should be flushed, we factor in this value. */
1495
buf_lru_flush_page_count += count;
1497
ut_ad(buf_pool_mutex_own(buf_pool));
1502
/*******************************************************************//**
1503
This utility flushes dirty blocks from the end of the flush_list.
1504
the calling thread is not allowed to own any latches on pages!
1505
@return number of blocks for which the write request was queued;
1506
ULINT_UNDEFINED if there was a flush of the same type already
1510
buf_flush_flush_list_batch(
1511
/*=======================*/
1512
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1513
ulint min_n, /*!< in: wished minimum mumber
1514
of blocks flushed (it is not
1515
guaranteed that the actual
1516
number is that big, though) */
1517
ib_uint64_t lsn_limit) /*!< all blocks whose
1518
oldest_modification is smaller
1519
than this should be flushed (if
1520
their number does not exceed
1527
ut_ad(buf_pool_mutex_own(buf_pool));
1529
/* If we have flushed enough, leave the loop */
1531
/* Start from the end of the list looking for a suitable
1532
block to be flushed. */
1534
buf_flush_list_mutex_enter(buf_pool);
1536
/* We use len here because theoretically insertions can
1537
happen in the flush_list below while we are traversing
1538
it for a suitable candidate for flushing. We'd like to
1539
set a limit on how farther we are willing to traverse
1541
len = UT_LIST_GET_LEN(buf_pool->flush_list);
1542
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1545
ut_a(bpage->oldest_modification > 0);
1548
if (!bpage || bpage->oldest_modification >= lsn_limit) {
1550
/* We have flushed enough */
1551
buf_flush_list_mutex_exit(buf_pool);
1555
ut_a(bpage->oldest_modification > 0);
1557
ut_ad(bpage->in_flush_list);
1559
buf_flush_list_mutex_exit(buf_pool);
1561
/* The list may change during the flushing and we cannot
1562
safely preserve within this function a pointer to a
1563
block in the list! */
1564
while (bpage != NULL
1566
&& !buf_flush_page_and_try_neighbors(
1567
bpage, BUF_FLUSH_LIST, min_n, &count)) {
1569
buf_flush_list_mutex_enter(buf_pool);
1571
/* If we are here that means that buf_pool->mutex
1572
was not released in buf_flush_page_and_try_neighbors()
1573
above and this guarantees that bpage didn't get
1574
relocated since we released the flush_list
1575
mutex above. There is a chance, however, that
1576
the bpage got removed from flush_list (not
1577
currently possible because flush_list_remove()
1578
also obtains buf_pool mutex but that may change
1579
in future). To avoid this scenario we check
1580
the oldest_modification and if it is zero
1581
we start all over again. */
1582
if (bpage->oldest_modification == 0) {
1583
buf_flush_list_mutex_exit(buf_pool);
1587
bpage = UT_LIST_GET_PREV(list, bpage);
1589
ut_ad(!bpage || bpage->in_flush_list);
1591
buf_flush_list_mutex_exit(buf_pool);
1596
} while (count < min_n && bpage != NULL && len > 0);
1598
ut_ad(buf_pool_mutex_own(buf_pool));
1211
buf_pool_mutex_exit();
1620
1232
ulint min_n, /*!< in: wished minimum mumber of blocks
1621
1233
flushed (it is not guaranteed that the
1622
1234
actual number is that big, though) */
1623
ib_uint64_t lsn_limit) /*!< in: in the case of BUF_FLUSH_LIST
1624
all blocks whose oldest_modification is
1235
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1236
blocks whose oldest_modification is
1625
1237
smaller than this should be flushed
1626
1238
(if their number does not exceed
1627
1239
min_n), otherwise ignored */
1242
ulint page_count = 0;
1243
ulint old_page_count;
1631
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1247
ut_ad((flush_type == BUF_FLUSH_LRU)
1248
|| (flush_type == BUF_FLUSH_LIST));
1632
1249
#ifdef UNIV_SYNC_DEBUG
1633
1250
ut_ad((flush_type != BUF_FLUSH_LIST)
1634
1251
|| sync_thread_levels_empty_gen(TRUE));
1635
1252
#endif /* UNIV_SYNC_DEBUG */
1637
buf_pool_mutex_enter(buf_pool);
1639
/* Note: The buffer pool mutex is released and reacquired within
1640
the flush functions. */
1641
switch(flush_type) {
1643
count = buf_flush_LRU_list_batch(buf_pool, min_n);
1645
case BUF_FLUSH_LIST:
1646
count = buf_flush_flush_list_batch(buf_pool, min_n, lsn_limit);
1652
buf_pool_mutex_exit(buf_pool);
1654
buf_flush_buffered_writes();
1657
if (buf_debug_prints && count > 0) {
1658
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1659
? "Flushed %lu pages in LRU flush\n"
1660
: "Flushed %lu pages in flush list flush\n",
1663
#endif /* UNIV_DEBUG */
1665
srv_buf_pool_flushed += count;
1670
/******************************************************************//**
1671
Gather the aggregated stats for both flush list and LRU list flushing */
1676
enum buf_flush flush_type, /*!< in: type of flush */
1677
ulint page_count) /*!< in: number of pages flushed */
1679
buf_flush_buffered_writes();
1681
ut_a(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1253
buf_pool_mutex_enter();
1255
if ((buf_pool->n_flush[flush_type] > 0)
1256
|| (buf_pool->init_flush[flush_type] == TRUE)) {
1258
/* There is already a flush batch of the same type running */
1260
buf_pool_mutex_exit();
1262
return(ULINT_UNDEFINED);
1265
buf_pool->init_flush[flush_type] = TRUE;
1267
bool done_with_loop= false;
1268
for (;done_with_loop != true;) {
1270
/* If we have flushed enough, leave the loop */
1271
if (page_count >= min_n) {
1276
/* Start from the end of the list looking for a suitable
1277
block to be flushed. */
1279
if (flush_type == BUF_FLUSH_LRU) {
1280
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1282
ut_ad(flush_type == BUF_FLUSH_LIST);
1284
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1286
|| bpage->oldest_modification >= lsn_limit) {
1287
/* We have flushed enough */
1291
ut_ad(bpage->in_flush_list);
1294
/* Note that after finding a single flushable page, we try to
1295
flush also all its neighbors, and after that start from the
1296
END of the LRU list or flush list again: the list may change
1297
during the flushing and we cannot safely preserve within this
1298
function a pointer to a block in the list! */
1301
mutex_t*block_mutex = buf_page_get_mutex(bpage);
1304
ut_a(buf_page_in_file(bpage));
1306
mutex_enter(block_mutex);
1307
ready = buf_flush_ready_for_flush(bpage, flush_type);
1308
mutex_exit(block_mutex);
1311
space = buf_page_get_space(bpage);
1312
offset = buf_page_get_page_no(bpage);
1314
buf_pool_mutex_exit();
1316
old_page_count = page_count;
1318
/* Try to flush also all the neighbors */
1319
page_count += buf_flush_try_neighbors(
1320
space, offset, flush_type);
1322
"Flush type %lu, page no %lu, neighb %lu\n",
1324
page_count - old_page_count); */
1326
buf_pool_mutex_enter();
1329
} else if (flush_type == BUF_FLUSH_LRU) {
1330
bpage = UT_LIST_GET_PREV(LRU, bpage);
1332
ut_ad(flush_type == BUF_FLUSH_LIST);
1334
bpage = UT_LIST_GET_PREV(list, bpage);
1335
ut_ad(!bpage || bpage->in_flush_list);
1337
} while (bpage != NULL);
1339
/* If we could not find anything to flush, leave the loop */
1341
done_with_loop= true;
1345
buf_pool->init_flush[flush_type] = FALSE;
1347
if (buf_pool->n_flush[flush_type] == 0) {
1349
/* The running flush batch has ended */
1351
os_event_set(buf_pool->no_flush[flush_type]);
1354
buf_pool_mutex_exit();
1356
buf_flush_buffered_writes();
1683
1358
#ifdef UNIV_DEBUG
1684
1359
if (buf_debug_prints && page_count > 0) {
1360
ut_a(flush_type == BUF_FLUSH_LRU
1361
|| flush_type == BUF_FLUSH_LIST);
1685
1362
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1686
1363
? "Flushed %lu pages in LRU flush\n"
1687
1364
: "Flushed %lu pages in flush list flush\n",
1692
1369
srv_buf_pool_flushed += page_count;
1371
/* We keep track of all flushes happening as part of LRU
1372
flush. When estimating the desired rate at which flush_list
1373
should be flushed we factor in this value. */
1694
1374
if (flush_type == BUF_FLUSH_LRU) {
1695
/* We keep track of all flushes happening as part of LRU
1696
flush. When estimating the desired rate at which flush_list
1697
should be flushed we factor in this value. */
1698
1375
buf_lru_flush_page_count += page_count;
1702
/******************************************************************//**
1703
Start a buffer flush batch for LRU or flush list */
1708
buf_pool_t* buf_pool, /*!< buffer pool instance */
1709
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1710
or BUF_FLUSH_LIST */
1712
buf_pool_mutex_enter(buf_pool);
1714
if (buf_pool->n_flush[flush_type] > 0
1715
|| buf_pool->init_flush[flush_type] == TRUE) {
1717
/* There is already a flush batch of the same type running */
1719
buf_pool_mutex_exit(buf_pool);
1724
buf_pool->init_flush[flush_type] = TRUE;
1726
buf_pool_mutex_exit(buf_pool);
1731
/******************************************************************//**
1732
End a buffer flush batch for LRU or flush list */
1737
buf_pool_t* buf_pool, /*!< buffer pool instance */
1738
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1739
or BUF_FLUSH_LIST */
1741
buf_pool_mutex_enter(buf_pool);
1743
buf_pool->init_flush[flush_type] = FALSE;
1745
if (buf_pool->n_flush[flush_type] == 0) {
1747
/* The running flush batch has ended */
1749
os_event_set(buf_pool->no_flush[flush_type]);
1752
buf_pool_mutex_exit(buf_pool);
1755
1381
/******************************************************************//**
1759
1385
buf_flush_wait_batch_end(
1760
1386
/*=====================*/
1761
buf_pool_t* buf_pool, /*!< buffer pool instance */
1762
enum buf_flush type) /*!< in: BUF_FLUSH_LRU
1763
or BUF_FLUSH_LIST */
1765
ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST);
1767
if (buf_pool == NULL) {
1770
for (i = 0; i < srv_buf_pool_instances; ++i) {
1771
buf_pool_t* buf_pool;
1773
buf_pool = buf_pool_from_array(i);
1775
os_event_wait(buf_pool->no_flush[type]);
1778
os_event_wait(buf_pool->no_flush[type]);
1782
/*******************************************************************//**
1783
This utility flushes dirty blocks from the end of the LRU list.
1784
NOTE: The calling thread may own latches to pages: to avoid deadlocks,
1785
this function must be written so that it cannot end up waiting for these
1787
@return number of blocks for which the write request was queued;
1788
ULINT_UNDEFINED if there was a flush of the same type already running */
1793
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1794
ulint min_n) /*!< in: wished minimum mumber of blocks
1795
flushed (it is not guaranteed that the
1796
actual number is that big, though) */
1800
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
1801
return(ULINT_UNDEFINED);
1804
page_count = buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0);
1806
buf_flush_end(buf_pool, BUF_FLUSH_LRU);
1808
buf_flush_common(BUF_FLUSH_LRU, page_count);
1813
/*******************************************************************//**
1814
This utility flushes dirty blocks from the end of the flush list of
1815
all buffer pool instances.
1816
NOTE: The calling thread is not allowed to own any latches on pages!
1817
@return number of blocks for which the write request was queued;
1818
ULINT_UNDEFINED if there was a flush of the same type already running */
1823
ulint min_n, /*!< in: wished minimum mumber of blocks
1824
flushed (it is not guaranteed that the
1825
actual number is that big, though) */
1826
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1827
blocks whose oldest_modification is
1828
smaller than this should be flushed
1829
(if their number does not exceed
1830
min_n), otherwise ignored */
1833
ulint total_page_count = 0;
1834
ibool skipped = FALSE;
1836
if (min_n != ULINT_MAX) {
1837
/* Ensure that flushing is spread evenly amongst the
1838
buffer pool instances. When min_n is ULINT_MAX
1839
we need to flush everything up to the lsn limit
1840
so no limit here. */
1841
min_n = (min_n + srv_buf_pool_instances - 1)
1842
/ srv_buf_pool_instances;
1845
/* Flush to lsn_limit in all buffer pool instances */
1846
for (i = 0; i < srv_buf_pool_instances; i++) {
1847
buf_pool_t* buf_pool;
1848
ulint page_count = 0;
1850
buf_pool = buf_pool_from_array(i);
1852
if (!buf_flush_start(buf_pool, BUF_FLUSH_LIST)) {
1853
/* We have two choices here. If lsn_limit was
1854
specified then skipping an instance of buffer
1855
pool means we cannot guarantee that all pages
1856
up to lsn_limit has been flushed. We can
1857
return right now with failure or we can try
1858
to flush remaining buffer pools up to the
1859
lsn_limit. We attempt to flush other buffer
1860
pools based on the assumption that it will
1861
help in the retry which will follow the
1868
page_count = buf_flush_batch(
1869
buf_pool, BUF_FLUSH_LIST, min_n, lsn_limit);
1871
buf_flush_end(buf_pool, BUF_FLUSH_LIST);
1873
buf_flush_common(BUF_FLUSH_LIST, page_count);
1875
total_page_count += page_count;
1878
return(lsn_limit != IB_ULONGLONG_MAX && skipped
1879
? ULINT_UNDEFINED : total_page_count);
1387
enum buf_flush type) /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
1389
ut_ad((type == BUF_FLUSH_LRU) || (type == BUF_FLUSH_LIST));
1391
os_event_wait(buf_pool->no_flush[type]);
1882
1394
/******************************************************************//**
1883
1395
Gives a recommendation of how many blocks should be flushed to establish
1884
1396
a big enough margin of replaceable blocks near the end of the LRU list
1941
1451
immediately, without waiting. */
1944
buf_flush_free_margin(
1945
/*==================*/
1946
buf_pool_t* buf_pool) /*!< in: Buffer pool instance */
1454
buf_flush_free_margin(void)
1455
/*=======================*/
1948
1457
ulint n_to_flush;
1950
n_to_flush = buf_flush_LRU_recommendation(buf_pool);
1460
n_to_flush = buf_flush_LRU_recommendation();
1952
1462
if (n_to_flush > 0) {
1955
n_flushed = buf_flush_LRU(buf_pool, n_to_flush);
1463
n_flushed = buf_flush_batch(BUF_FLUSH_LRU, n_to_flush, 0);
1957
1464
if (n_flushed == ULINT_UNDEFINED) {
1958
1465
/* There was an LRU type flush batch already running;
1959
1466
let us wait for it to end */
1961
buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU);
1468
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
1966
/*********************************************************************//**
1967
Flushes pages from the end of all the LRU lists. */
1970
buf_flush_free_margins(void)
1971
/*========================*/
1975
for (i = 0; i < srv_buf_pool_instances; i++) {
1976
buf_pool_t* buf_pool;
1978
buf_pool = buf_pool_from_array(i);
1980
buf_flush_free_margin(buf_pool);
1984
1473
/*********************************************************************
1985
1474
Update the historical stats that we are collecting for flush rate
1986
1475
heuristics at the end of each interval.