228
254
buf_flush_insert_into_flush_list(
229
255
/*=============================*/
230
buf_block_t* block) /*!< in/out: block which is modified */
256
buf_pool_t* buf_pool, /*!< buffer pool instance */
257
buf_block_t* block, /*!< in/out: block which is modified */
258
ib_uint64_t lsn) /*!< in: oldest modification */
232
ut_ad(buf_pool_mutex_own());
260
ut_ad(!buf_pool_mutex_own(buf_pool));
261
ut_ad(log_flush_order_mutex_own());
262
ut_ad(mutex_own(&block->mutex));
264
buf_flush_list_mutex_enter(buf_pool);
233
266
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
234
267
|| (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
235
<= block->page.oldest_modification));
237
270
/* If we are in the recovery then we need to update the flush
238
271
red-black tree as well. */
239
272
if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
240
buf_flush_insert_sorted_into_flush_list(block);
273
buf_flush_list_mutex_exit(buf_pool);
274
buf_flush_insert_sorted_into_flush_list(buf_pool, block, lsn);
244
278
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
245
ut_ad(block->page.in_LRU_list);
246
ut_ad(block->page.in_page_hash);
247
ut_ad(!block->page.in_zip_hash);
248
279
ut_ad(!block->page.in_flush_list);
249
281
ut_d(block->page.in_flush_list = TRUE);
282
block->page.oldest_modification = lsn;
250
283
UT_LIST_ADD_FIRST(list, buf_pool->flush_list, &block->page);
285
#ifdef UNIV_DEBUG_VALGRIND
287
ulint zip_size = buf_block_get_zip_size(block);
289
if (UNIV_UNLIKELY(zip_size)) {
290
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
292
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
295
#endif /* UNIV_DEBUG_VALGRIND */
252
296
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
253
ut_a(buf_flush_validate_low());
297
ut_a(buf_flush_validate_low(buf_pool));
254
298
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
300
buf_flush_list_mutex_exit(buf_pool);
257
303
/********************************************************************//**
263
309
buf_flush_insert_sorted_into_flush_list(
264
310
/*====================================*/
265
buf_block_t* block) /*!< in/out: block which is modified */
311
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
312
buf_block_t* block, /*!< in/out: block which is modified */
313
ib_uint64_t lsn) /*!< in: oldest modification */
267
315
buf_page_t* prev_b;
270
ut_ad(buf_pool_mutex_own());
318
ut_ad(!buf_pool_mutex_own(buf_pool));
319
ut_ad(log_flush_order_mutex_own());
320
ut_ad(mutex_own(&block->mutex));
271
321
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
323
buf_flush_list_mutex_enter(buf_pool);
325
/* The field in_LRU_list is protected by buf_pool_mutex, which
326
we are not holding. However, while a block is in the flush
327
list, it is dirty and cannot be discarded, not from the
328
page_hash or from the LRU list. At most, the uncompressed
329
page frame of a compressed block may be discarded or created
330
(copying the block->page to or from a buf_page_t that is
331
dynamically allocated from buf_buddy_alloc()). Because those
332
transitions hold block->mutex and the flush list mutex (via
333
buf_flush_relocate_on_flush_list()), there is no possibility
334
of a race condition in the assertions below. */
273
335
ut_ad(block->page.in_LRU_list);
274
336
ut_ad(block->page.in_page_hash);
337
/* buf_buddy_block_register() will take a block in the
338
BUF_BLOCK_MEMORY state, not a file page. */
275
339
ut_ad(!block->page.in_zip_hash);
276
341
ut_ad(!block->page.in_flush_list);
277
342
ut_d(block->page.in_flush_list = TRUE);
343
block->page.oldest_modification = lsn;
345
#ifdef UNIV_DEBUG_VALGRIND
347
ulint zip_size = buf_block_get_zip_size(block);
349
if (UNIV_UNLIKELY(zip_size)) {
350
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
352
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
355
#endif /* UNIV_DEBUG_VALGRIND */
357
#ifdef UNIV_DEBUG_VALGRIND
359
ulint zip_size = buf_block_get_zip_size(block);
361
if (UNIV_UNLIKELY(zip_size)) {
362
UNIV_MEM_ASSERT_RW(block->page.zip.data, zip_size);
364
UNIV_MEM_ASSERT_RW(block->frame, UNIV_PAGE_SIZE);
367
#endif /* UNIV_DEBUG_VALGRIND */
1190
1339
if (buf_flush_ready_for_flush(bpage, flush_type)
1191
1340
&& (i == offset || !bpage->buf_fix_count)) {
1192
1341
/* We only try to flush those
1193
neighbors != offset where the buf fix count is
1194
zero, as we then know that we probably can
1195
latch the page without a semaphore wait.
1196
Semaphore waits are expensive because we must
1197
flush the doublewrite buffer before we start
1342
neighbors != offset where the buf fix
1343
count is zero, as we then know that we
1344
probably can latch the page without a
1345
semaphore wait. Semaphore waits are
1346
expensive because we must flush the
1347
doublewrite buffer before we start
1200
buf_flush_page(bpage, flush_type);
1350
buf_flush_page(buf_pool, bpage, flush_type);
1201
1351
ut_ad(!mutex_own(block_mutex));
1352
ut_ad(!buf_pool_mutex_own(buf_pool));
1204
buf_pool_mutex_enter();
1206
1356
mutex_exit(block_mutex);
1211
buf_pool_mutex_exit();
1359
buf_pool_mutex_exit(buf_pool);
1365
/********************************************************************//**
1366
Check if the block is modified and ready for flushing. If the the block
1367
is ready to flush then flush the page and try o flush its neighbors.
1369
@return TRUE if buf_pool mutex was not released during this function.
1370
This does not guarantee that some pages were written as well.
1371
Number of pages written are incremented to the count. */
1374
buf_flush_page_and_try_neighbors(
1375
/*=============================*/
1376
buf_page_t* bpage, /*!< in: buffer control block,
1378
buf_page_in_file(bpage) */
1379
enum buf_flush flush_type, /*!< in: BUF_FLUSH_LRU
1380
or BUF_FLUSH_LIST */
1381
ulint* count) /*!< in/out: number of pages
1384
mutex_t* block_mutex;
1385
ibool flushed = FALSE;
1387
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
1388
#endif /* UNIV_DEBUG */
1390
ut_ad(buf_pool_mutex_own(buf_pool));
1392
block_mutex = buf_page_get_mutex(bpage);
1393
mutex_enter(block_mutex);
1395
ut_a(buf_page_in_file(bpage));
1397
if (buf_flush_ready_for_flush(bpage, flush_type)) {
1400
buf_pool_t* buf_pool;
1402
buf_pool = buf_pool_from_bpage(bpage);
1404
buf_pool_mutex_exit(buf_pool);
1406
/* These fields are protected by both the
1407
buffer pool mutex and block mutex. */
1408
space = buf_page_get_space(bpage);
1409
offset = buf_page_get_page_no(bpage);
1411
mutex_exit(block_mutex);
1413
/* Try to flush also all the neighbors */
1414
*count += buf_flush_try_neighbors(space, offset, flush_type);
1416
buf_pool_mutex_enter(buf_pool);
1419
mutex_exit(block_mutex);
1422
ut_ad(buf_pool_mutex_own(buf_pool));
1427
/*******************************************************************//**
1428
This utility flushes dirty blocks from the end of the LRU list.
1429
In the case of an LRU flush the calling thread may own latches to
1430
pages: to avoid deadlocks, this function must be written so that it
1431
cannot end up waiting for these latches!
1432
@return number of blocks for which the write request was queued. */
1435
buf_flush_LRU_list_batch(
1436
/*=====================*/
1437
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1438
ulint max) /*!< in: max of blocks to flush */
1443
ut_ad(buf_pool_mutex_own(buf_pool));
1446
/* Start from the end of the list looking for a
1447
suitable block to be flushed. */
1448
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1450
/* Iterate backwards over the flush list till we find
1451
a page that isn't ready for flushing. */
1452
while (bpage != NULL
1453
&& !buf_flush_page_and_try_neighbors(
1454
bpage, BUF_FLUSH_LRU, &count)) {
1456
bpage = UT_LIST_GET_PREV(LRU, bpage);
1458
} while (bpage != NULL && count < max);
1460
/* We keep track of all flushes happening as part of LRU
1461
flush. When estimating the desired rate at which flush_list
1462
should be flushed, we factor in this value. */
1463
buf_lru_flush_page_count += count;
1465
ut_ad(buf_pool_mutex_own(buf_pool));
1470
/*******************************************************************//**
1471
This utility flushes dirty blocks from the end of the flush_list.
1472
the calling thread is not allowed to own any latches on pages!
1473
@return number of blocks for which the write request was queued;
1474
ULINT_UNDEFINED if there was a flush of the same type already
1478
buf_flush_flush_list_batch(
1479
/*=======================*/
1480
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1481
ulint min_n, /*!< in: wished minimum mumber
1482
of blocks flushed (it is not
1483
guaranteed that the actual
1484
number is that big, though) */
1485
ib_uint64_t lsn_limit) /*!< all blocks whose
1486
oldest_modification is smaller
1487
than this should be flushed (if
1488
their number does not exceed
1495
ut_ad(buf_pool_mutex_own(buf_pool));
1497
/* If we have flushed enough, leave the loop */
1499
/* Start from the end of the list looking for a suitable
1500
block to be flushed. */
1502
buf_flush_list_mutex_enter(buf_pool);
1504
/* We use len here because theoretically insertions can
1505
happen in the flush_list below while we are traversing
1506
it for a suitable candidate for flushing. We'd like to
1507
set a limit on how farther we are willing to traverse
1509
len = UT_LIST_GET_LEN(buf_pool->flush_list);
1510
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1513
ut_a(bpage->oldest_modification > 0);
1516
if (!bpage || bpage->oldest_modification >= lsn_limit) {
1518
/* We have flushed enough */
1519
buf_flush_list_mutex_exit(buf_pool);
1523
ut_a(bpage->oldest_modification > 0);
1525
ut_ad(bpage->in_flush_list);
1527
buf_flush_list_mutex_exit(buf_pool);
1529
/* The list may change during the flushing and we cannot
1530
safely preserve within this function a pointer to a
1531
block in the list! */
1532
while (bpage != NULL
1534
&& !buf_flush_page_and_try_neighbors(
1535
bpage, BUF_FLUSH_LIST, &count)) {
1537
buf_flush_list_mutex_enter(buf_pool);
1539
/* If we are here that means that buf_pool->mutex
1540
was not released in buf_flush_page_and_try_neighbors()
1541
above and this guarantees that bpage didn't get
1542
relocated since we released the flush_list
1543
mutex above. There is a chance, however, that
1544
the bpage got removed from flush_list (not
1545
currently possible because flush_list_remove()
1546
also obtains buf_pool mutex but that may change
1547
in future). To avoid this scenario we check
1548
the oldest_modification and if it is zero
1549
we start all over again. */
1550
if (bpage->oldest_modification == 0) {
1551
buf_flush_list_mutex_exit(buf_pool);
1555
bpage = UT_LIST_GET_PREV(list, bpage);
1557
ut_ad(!bpage || bpage->in_flush_list);
1559
buf_flush_list_mutex_exit(buf_pool);
1564
} while (count < min_n && bpage != NULL && len > 0);
1566
ut_ad(buf_pool_mutex_own(buf_pool));
1232
1588
ulint min_n, /*!< in: wished minimum mumber of blocks
1233
1589
flushed (it is not guaranteed that the
1234
1590
actual number is that big, though) */
1235
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1236
blocks whose oldest_modification is
1591
ib_uint64_t lsn_limit) /*!< in: in the case of BUF_FLUSH_LIST
1592
all blocks whose oldest_modification is
1237
1593
smaller than this should be flushed
1238
1594
(if their number does not exceed
1239
1595
min_n), otherwise ignored */
1242
ulint page_count = 0;
1243
ulint old_page_count;
1247
ut_ad((flush_type == BUF_FLUSH_LRU)
1248
|| (flush_type == BUF_FLUSH_LIST));
1599
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1249
1600
#ifdef UNIV_SYNC_DEBUG
1250
1601
ut_ad((flush_type != BUF_FLUSH_LIST)
1251
1602
|| sync_thread_levels_empty_gen(TRUE));
1252
1603
#endif /* UNIV_SYNC_DEBUG */
1253
buf_pool_mutex_enter();
1255
if ((buf_pool->n_flush[flush_type] > 0)
1256
|| (buf_pool->init_flush[flush_type] == TRUE)) {
1258
/* There is already a flush batch of the same type running */
1260
buf_pool_mutex_exit();
1262
return(ULINT_UNDEFINED);
1265
buf_pool->init_flush[flush_type] = TRUE;
1267
bool done_with_loop= false;
1268
for (;done_with_loop != true;) {
1270
/* If we have flushed enough, leave the loop */
1271
if (page_count >= min_n) {
1276
/* Start from the end of the list looking for a suitable
1277
block to be flushed. */
1279
if (flush_type == BUF_FLUSH_LRU) {
1280
bpage = UT_LIST_GET_LAST(buf_pool->LRU);
1282
ut_ad(flush_type == BUF_FLUSH_LIST);
1284
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
1286
|| bpage->oldest_modification >= lsn_limit) {
1287
/* We have flushed enough */
1291
ut_ad(bpage->in_flush_list);
1294
/* Note that after finding a single flushable page, we try to
1295
flush also all its neighbors, and after that start from the
1296
END of the LRU list or flush list again: the list may change
1297
during the flushing and we cannot safely preserve within this
1298
function a pointer to a block in the list! */
1301
mutex_t*block_mutex = buf_page_get_mutex(bpage);
1304
ut_a(buf_page_in_file(bpage));
1306
mutex_enter(block_mutex);
1307
ready = buf_flush_ready_for_flush(bpage, flush_type);
1308
mutex_exit(block_mutex);
1311
space = buf_page_get_space(bpage);
1312
offset = buf_page_get_page_no(bpage);
1314
buf_pool_mutex_exit();
1316
old_page_count = page_count;
1318
/* Try to flush also all the neighbors */
1319
page_count += buf_flush_try_neighbors(
1320
space, offset, flush_type);
1322
"Flush type %lu, page no %lu, neighb %lu\n",
1324
page_count - old_page_count); */
1326
buf_pool_mutex_enter();
1329
} else if (flush_type == BUF_FLUSH_LRU) {
1330
bpage = UT_LIST_GET_PREV(LRU, bpage);
1332
ut_ad(flush_type == BUF_FLUSH_LIST);
1334
bpage = UT_LIST_GET_PREV(list, bpage);
1335
ut_ad(!bpage || bpage->in_flush_list);
1337
} while (bpage != NULL);
1339
/* If we could not find anything to flush, leave the loop */
1341
done_with_loop= true;
1345
buf_pool->init_flush[flush_type] = FALSE;
1347
if (buf_pool->n_flush[flush_type] == 0) {
1349
/* The running flush batch has ended */
1351
os_event_set(buf_pool->no_flush[flush_type]);
1354
buf_pool_mutex_exit();
1356
buf_flush_buffered_writes();
1605
buf_pool_mutex_enter(buf_pool);
1607
/* Note: The buffer pool mutex is released and reacquired within
1608
the flush functions. */
1609
switch(flush_type) {
1611
count = buf_flush_LRU_list_batch(buf_pool, min_n);
1613
case BUF_FLUSH_LIST:
1614
count = buf_flush_flush_list_batch(buf_pool, min_n, lsn_limit);
1620
buf_pool_mutex_exit(buf_pool);
1622
buf_flush_buffered_writes();
1625
if (buf_debug_prints && count > 0) {
1626
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1627
? "Flushed %lu pages in LRU flush\n"
1628
: "Flushed %lu pages in flush list flush\n",
1631
#endif /* UNIV_DEBUG */
1633
srv_buf_pool_flushed += count;
1638
/******************************************************************//**
1639
Gather the aggregated stats for both flush list and LRU list flushing */
1644
enum buf_flush flush_type, /*!< in: type of flush */
1645
ulint page_count) /*!< in: number of pages flushed */
1647
buf_flush_buffered_writes();
1649
ut_a(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
1358
1651
#ifdef UNIV_DEBUG
1359
1652
if (buf_debug_prints && page_count > 0) {
1360
ut_a(flush_type == BUF_FLUSH_LRU
1361
|| flush_type == BUF_FLUSH_LIST);
1362
1653
fprintf(stderr, flush_type == BUF_FLUSH_LRU
1363
1654
? "Flushed %lu pages in LRU flush\n"
1364
1655
: "Flushed %lu pages in flush list flush\n",
1369
1660
srv_buf_pool_flushed += page_count;
1371
/* We keep track of all flushes happening as part of LRU
1372
flush. When estimating the desired rate at which flush_list
1373
should be flushed we factor in this value. */
1374
1662
if (flush_type == BUF_FLUSH_LRU) {
1663
/* We keep track of all flushes happening as part of LRU
1664
flush. When estimating the desired rate at which flush_list
1665
should be flushed we factor in this value. */
1375
1666
buf_lru_flush_page_count += page_count;
1670
/******************************************************************//**
1671
Start a buffer flush batch for LRU or flush list */
1676
buf_pool_t* buf_pool, /*!< buffer pool instance */
1677
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1678
or BUF_FLUSH_LIST */
1680
buf_pool_mutex_enter(buf_pool);
1682
if (buf_pool->n_flush[flush_type] > 0
1683
|| buf_pool->init_flush[flush_type] == TRUE) {
1685
/* There is already a flush batch of the same type running */
1687
buf_pool_mutex_exit(buf_pool);
1692
buf_pool->init_flush[flush_type] = TRUE;
1694
buf_pool_mutex_exit(buf_pool);
1699
/******************************************************************//**
1700
End a buffer flush batch for LRU or flush list */
1705
buf_pool_t* buf_pool, /*!< buffer pool instance */
1706
enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU
1707
or BUF_FLUSH_LIST */
1709
buf_pool_mutex_enter(buf_pool);
1711
buf_pool->init_flush[flush_type] = FALSE;
1713
if (buf_pool->n_flush[flush_type] == 0) {
1715
/* The running flush batch has ended */
1717
os_event_set(buf_pool->no_flush[flush_type]);
1720
buf_pool_mutex_exit(buf_pool);
1381
1723
/******************************************************************//**
1385
1727
buf_flush_wait_batch_end(
1386
1728
/*=====================*/
1387
enum buf_flush type) /*!< in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */
1389
ut_ad((type == BUF_FLUSH_LRU) || (type == BUF_FLUSH_LIST));
1391
os_event_wait(buf_pool->no_flush[type]);
1729
buf_pool_t* buf_pool, /*!< buffer pool instance */
1730
enum buf_flush type) /*!< in: BUF_FLUSH_LRU
1731
or BUF_FLUSH_LIST */
1733
ut_ad(type == BUF_FLUSH_LRU || type == BUF_FLUSH_LIST);
1735
if (buf_pool == NULL) {
1738
for (i = 0; i < srv_buf_pool_instances; ++i) {
1739
buf_pool_t* buf_pool;
1741
buf_pool = buf_pool_from_array(i);
1743
os_event_wait(buf_pool->no_flush[type]);
1746
os_event_wait(buf_pool->no_flush[type]);
1750
/*******************************************************************//**
1751
This utility flushes dirty blocks from the end of the LRU list.
1752
NOTE: The calling thread may own latches to pages: to avoid deadlocks,
1753
this function must be written so that it cannot end up waiting for these
1755
@return number of blocks for which the write request was queued;
1756
ULINT_UNDEFINED if there was a flush of the same type already running */
1761
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1762
ulint min_n) /*!< in: wished minimum mumber of blocks
1763
flushed (it is not guaranteed that the
1764
actual number is that big, though) */
1768
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
1769
return(ULINT_UNDEFINED);
1772
page_count = buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0);
1774
buf_flush_end(buf_pool, BUF_FLUSH_LRU);
1776
buf_flush_common(BUF_FLUSH_LRU, page_count);
1781
/*******************************************************************//**
1782
This utility flushes dirty blocks from the end of the flush list of
1783
all buffer pool instances.
1784
NOTE: The calling thread is not allowed to own any latches on pages!
1785
@return number of blocks for which the write request was queued;
1786
ULINT_UNDEFINED if there was a flush of the same type already running */
1791
ulint min_n, /*!< in: wished minimum mumber of blocks
1792
flushed (it is not guaranteed that the
1793
actual number is that big, though) */
1794
ib_uint64_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
1795
blocks whose oldest_modification is
1796
smaller than this should be flushed
1797
(if their number does not exceed
1798
min_n), otherwise ignored */
1801
ulint total_page_count = 0;
1802
ibool skipped = FALSE;
1804
if (min_n != ULINT_MAX) {
1805
/* Ensure that flushing is spread evenly amongst the
1806
buffer pool instances. When min_n is ULINT_MAX
1807
we need to flush everything up to the lsn limit
1808
so no limit here. */
1809
min_n = (min_n + srv_buf_pool_instances - 1)
1810
/ srv_buf_pool_instances;
1813
/* Flush to lsn_limit in all buffer pool instances */
1814
for (i = 0; i < srv_buf_pool_instances; i++) {
1815
buf_pool_t* buf_pool;
1816
ulint page_count = 0;
1818
buf_pool = buf_pool_from_array(i);
1820
if (!buf_flush_start(buf_pool, BUF_FLUSH_LIST)) {
1821
/* We have two choices here. If lsn_limit was
1822
specified then skipping an instance of buffer
1823
pool means we cannot guarantee that all pages
1824
up to lsn_limit has been flushed. We can
1825
return right now with failure or we can try
1826
to flush remaining buffer pools up to the
1827
lsn_limit. We attempt to flush other buffer
1828
pools based on the assumption that it will
1829
help in the retry which will follow the
1836
page_count = buf_flush_batch(
1837
buf_pool, BUF_FLUSH_LIST, min_n, lsn_limit);
1839
buf_flush_end(buf_pool, BUF_FLUSH_LIST);
1841
buf_flush_common(BUF_FLUSH_LIST, page_count);
1843
total_page_count += page_count;
1846
return(lsn_limit != IB_ULONGLONG_MAX && skipped
1847
? ULINT_UNDEFINED : total_page_count);
1394
1850
/******************************************************************//**
1395
1851
Gives a recommendation of how many blocks should be flushed to establish
1396
1852
a big enough margin of replaceable blocks near the end of the LRU list
1451
1909
immediately, without waiting. */
1454
buf_flush_free_margin(void)
1455
/*=======================*/
1912
buf_flush_free_margin(
1913
/*==================*/
1914
buf_pool_t* buf_pool) /*!< in: Buffer pool instance */
1457
1916
ulint n_to_flush;
1460
n_to_flush = buf_flush_LRU_recommendation();
1918
n_to_flush = buf_flush_LRU_recommendation(buf_pool);
1462
1920
if (n_to_flush > 0) {
1463
n_flushed = buf_flush_batch(BUF_FLUSH_LRU, n_to_flush, 0);
1923
n_flushed = buf_flush_LRU(buf_pool, n_to_flush);
1464
1925
if (n_flushed == ULINT_UNDEFINED) {
1465
1926
/* There was an LRU type flush batch already running;
1466
1927
let us wait for it to end */
1468
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
1929
buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU);
1934
/*********************************************************************//**
1935
Flushes pages from the end of all the LRU lists. */
1938
buf_flush_free_margins(void)
1939
/*========================*/
1943
for (i = 0; i < srv_buf_pool_instances; i++) {
1944
buf_pool_t* buf_pool;
1946
buf_pool = buf_pool_from_array(i);
1948
buf_flush_free_margin(buf_pool);
1473
1952
/*********************************************************************
1474
1953
Update the historical stats that we are collecting for flush rate
1475
1954
heuristics at the end of each interval.