242
227
the read requests for the whole area.
245
#ifndef UNIV_HOTBACKUP
246
/** Value in microseconds */
230
/* Value in microseconds */
247
231
static const int WAIT_FOR_READ = 5000;
248
/** Number of attemtps made to read in a page in the buffer pool */
249
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
251
/** The buffer pools of the database */
252
UNIV_INTERN buf_pool_t* buf_pool_ptr;
233
/* The buffer buf_pool of the database */
234
UNIV_INTERN buf_pool_t* buf_pool = NULL;
236
/* mutex protecting the buffer pool struct and control blocks, except the
237
read-write lock in them */
238
UNIV_INTERN mutex_t buf_pool_mutex;
239
/* mutex protecting the control blocks of compressed-only pages
240
(of type buf_page_t, not buf_block_t) */
241
UNIV_INTERN mutex_t buf_pool_zip_mutex;
254
243
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
255
static ulint buf_dbg_counter = 0; /*!< This is used to insert validation
256
operations in execution in the
244
static ulint buf_dbg_counter = 0; /* This is used to insert validation
245
operations in excution in the
247
/** Flag to forbid the release of the buffer pool mutex.
248
Protected by buf_pool->mutex. */
249
UNIV_INTERN ulint buf_pool_mutex_exit_forbidden = 0;
258
250
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
259
251
#ifdef UNIV_DEBUG
260
/** If this is set TRUE, the program prints info whenever
252
/* If this is set TRUE, the program prints info whenever
261
253
read-ahead or flush occurs */
262
254
UNIV_INTERN ibool buf_debug_prints = FALSE;
263
255
#endif /* UNIV_DEBUG */
265
#ifdef UNIV_PFS_RWLOCK
266
/* Keys to register buffer block related rwlocks and mutexes with
267
performance schema */
268
UNIV_INTERN mysql_pfs_key_t buf_block_lock_key;
269
# ifdef UNIV_SYNC_DEBUG
270
UNIV_INTERN mysql_pfs_key_t buf_block_debug_latch_key;
271
# endif /* UNIV_SYNC_DEBUG */
272
#endif /* UNIV_PFS_RWLOCK */
274
#ifdef UNIV_PFS_MUTEX
275
UNIV_INTERN mysql_pfs_key_t buffer_block_mutex_key;
276
UNIV_INTERN mysql_pfs_key_t buf_pool_mutex_key;
277
UNIV_INTERN mysql_pfs_key_t buf_pool_zip_mutex_key;
278
UNIV_INTERN mysql_pfs_key_t flush_list_mutex_key;
279
#endif /* UNIV_PFS_MUTEX */
281
#if defined UNIV_PFS_MUTEX || defined UNIV_PFS_RWLOCK
282
# ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
284
/* Buffer block mutexes and rwlocks can be registered
285
in one group rather than individually. If PFS_GROUP_BUFFER_SYNC
286
is defined, register buffer block mutex and rwlock
287
in one group after their initialization. */
288
# define PFS_GROUP_BUFFER_SYNC
290
/* This define caps the number of mutexes/rwlocks can
291
be registered with performance schema. Developers can
292
modify this define if necessary. Please note, this would
293
be effective only if PFS_GROUP_BUFFER_SYNC is defined. */
294
# define PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER ULINT_MAX
296
# endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
297
#endif /* UNIV_PFS_MUTEX || UNIV_PFS_RWLOCK */
299
/** A chunk of buffers. The buffer pool is allocated in chunks. */
257
/* A chunk of buffers. The buffer pool is allocated in chunks. */
300
258
struct buf_chunk_struct{
301
ulint mem_size; /*!< allocated size of the chunk */
302
ulint size; /*!< size of frames[] and blocks[] */
303
void* mem; /*!< pointer to the memory area which
259
ulint mem_size; /* allocated size of the chunk */
260
ulint size; /* size of frames[] and blocks[] */
261
void* mem; /* pointer to the memory area which
304
262
was allocated for the frames */
305
buf_block_t* blocks; /*!< array of buffer control blocks */
263
buf_block_t* blocks; /* array of buffer control blocks */
307
#endif /* !UNIV_HOTBACKUP */
309
/********************************************************************//**
310
Gets the smallest oldest_modification lsn for any page in the pool. Returns
311
zero if all modified pages have been flushed to disk.
312
@return oldest modification in pool, zero if none */
315
buf_pool_get_oldest_modification(void)
316
/*==================================*/
321
ib_uint64_t oldest_lsn = 0;
323
/* When we traverse all the flush lists we don't want another
324
thread to add a dirty page to any flush list. */
325
log_flush_order_mutex_enter();
327
for (i = 0; i < srv_buf_pool_instances; i++) {
328
buf_pool_t* buf_pool;
330
buf_pool = buf_pool_from_array(i);
332
buf_flush_list_mutex_enter(buf_pool);
334
bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
337
ut_ad(bpage->in_flush_list);
338
lsn = bpage->oldest_modification;
341
buf_flush_list_mutex_exit(buf_pool);
343
if (!oldest_lsn || oldest_lsn > lsn) {
348
log_flush_order_mutex_exit();
350
/* The returned answer may be out of date: the flush_list can
351
change after the mutex has been released. */
356
/********************************************************************//**
357
Get total buffer pool statistics. */
360
buf_get_total_list_len(
361
/*===================*/
362
ulint* LRU_len, /*!< out: length of all LRU lists */
363
ulint* free_len, /*!< out: length of all free lists */
364
ulint* flush_list_len) /*!< out: length of all flush lists */
372
for (i = 0; i < srv_buf_pool_instances; i++) {
373
buf_pool_t* buf_pool;
375
buf_pool = buf_pool_from_array(i);
376
*LRU_len += UT_LIST_GET_LEN(buf_pool->LRU);
377
*free_len += UT_LIST_GET_LEN(buf_pool->free);
378
*flush_list_len += UT_LIST_GET_LEN(buf_pool->flush_list);
382
/********************************************************************//**
383
Get total buffer pool statistics. */
388
buf_pool_stat_t* tot_stat) /*!< out: buffer pool stats */
392
memset(tot_stat, 0, sizeof(*tot_stat));
394
for (i = 0; i < srv_buf_pool_instances; i++) {
395
buf_pool_stat_t*buf_stat;
396
buf_pool_t* buf_pool;
398
buf_pool = buf_pool_from_array(i);
400
buf_stat = &buf_pool->stat;
401
tot_stat->n_page_gets += buf_stat->n_page_gets;
402
tot_stat->n_pages_read += buf_stat->n_pages_read;
403
tot_stat->n_pages_written += buf_stat->n_pages_written;
404
tot_stat->n_pages_created += buf_stat->n_pages_created;
405
tot_stat->n_ra_pages_read += buf_stat->n_ra_pages_read;
406
tot_stat->n_ra_pages_evicted += buf_stat->n_ra_pages_evicted;
407
tot_stat->n_pages_made_young += buf_stat->n_pages_made_young;
409
tot_stat->n_pages_not_made_young +=
410
buf_stat->n_pages_not_made_young;
414
/********************************************************************//**
415
Allocates a buffer block.
416
@return own: the allocated block, in state BUF_BLOCK_MEMORY */
421
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
422
ulint zip_size) /*!< in: compressed page size in bytes,
423
or 0 if uncompressed tablespace */
427
static ulint buf_pool_index;
429
if (buf_pool == NULL) {
430
/* We are allocating memory from any buffer pool, ensure
431
we spread the grace on all buffer pool instances. */
432
index = buf_pool_index++ % srv_buf_pool_instances;
433
buf_pool = buf_pool_from_array(index);
436
block = buf_LRU_get_free_block(buf_pool, zip_size);
438
buf_block_set_state(block, BUF_BLOCK_MEMORY);
443
/********************************************************************//**
266
/************************************************************************
444
267
Calculates a page checksum which is stored to the page when it is written
445
268
to a file. Note that we must be careful to calculate the same value on
446
32-bit and 64-bit architectures.
269
32-bit and 64-bit architectures. */
450
272
buf_calc_page_new_checksum(
451
273
/*=======================*/
452
const byte* page) /*!< in: buffer page */
275
const byte* page) /* in: buffer page */
1159
908
os_mem_free_large(chunk->mem, chunk->mem_size);
1162
/********************************************************************//**
1163
Set buffer pool size variables after resizing it */
1166
buf_pool_set_sizes(void)
1167
/*====================*/
1170
ulint curr_size = 0;
1172
buf_pool_mutex_enter_all();
1174
for (i = 0; i < srv_buf_pool_instances; i++) {
1175
buf_pool_t* buf_pool;
1177
buf_pool = buf_pool_from_array(i);
1178
curr_size += buf_pool->curr_pool_size;
1181
srv_buf_pool_curr_size = curr_size;
1182
srv_buf_pool_old_size = srv_buf_pool_size;
1184
buf_pool_mutex_exit_all();
1187
/********************************************************************//**
1188
Initialize a buffer pool instance.
1189
@return DB_SUCCESS if all goes well. */
1192
buf_pool_init_instance(
1193
/*===================*/
1194
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1195
ulint buf_pool_size, /*!< in: size in bytes */
1196
ulint instance_no) /*!< in: id of the instance */
911
/************************************************************************
912
Creates the buffer pool. */
917
/* out, own: buf_pool object, NULL if not
918
enough memory or error */
923
buf_pool = mem_zalloc(sizeof(buf_pool_t));
1201
925
/* 1. Initialize general fields
1202
926
------------------------------- */
1203
mutex_create(buf_pool_mutex_key,
1204
&buf_pool->mutex, SYNC_BUF_POOL);
1205
mutex_create(buf_pool_zip_mutex_key,
1206
&buf_pool->zip_mutex, SYNC_BUF_BLOCK);
1208
buf_pool_mutex_enter(buf_pool);
1210
if (buf_pool_size > 0) {
1211
buf_pool->n_chunks = 1;
1212
void *chunk_ptr= mem_zalloc((sizeof *chunk));
1213
buf_pool->chunks = chunk = static_cast<buf_chunk_t *>(chunk_ptr);
1215
UT_LIST_INIT(buf_pool->free);
1217
if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) {
1221
buf_pool_mutex_exit(buf_pool);
1226
buf_pool->instance_no = instance_no;
1227
buf_pool->old_pool_size = buf_pool_size;
1228
buf_pool->curr_size = chunk->size;
1229
buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
1231
buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
1232
buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
1234
buf_pool->last_printout_time = ut_time();
927
mutex_create(&buf_pool_mutex, SYNC_BUF_POOL);
928
mutex_create(&buf_pool_zip_mutex, SYNC_BUF_BLOCK);
930
buf_pool_mutex_enter();
932
buf_pool->n_chunks = 1;
933
buf_pool->chunks = chunk = mem_alloc(sizeof *chunk);
935
UT_LIST_INIT(buf_pool->free);
937
if (!buf_chunk_init(chunk, srv_buf_pool_size)) {
944
srv_buf_pool_old_size = srv_buf_pool_size;
945
buf_pool->curr_size = chunk->size;
946
srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
948
buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
949
buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
951
buf_pool->last_printout_time = time(NULL);
1236
953
/* 2. Initialize flushing fields
1237
954
-------------------------------- */
1239
mutex_create(flush_list_mutex_key, &buf_pool->flush_list_mutex,
1240
SYNC_BUF_FLUSH_LIST);
1242
956
for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
1243
957
buf_pool->no_flush[i] = os_event_create(NULL);
960
buf_pool->ulint_clock = 1;
1246
962
/* 3. Initialize LRU fields
1247
963
--------------------------- */
1249
/* All fields are initialized by mem_zalloc(). */
1251
buf_pool_mutex_exit(buf_pool);
964
/* All fields are initialized by mem_zalloc(). */
966
buf_pool_mutex_exit();
968
btr_search_sys_create(buf_pool->curr_size
969
* UNIV_PAGE_SIZE / sizeof(void*) / 64);
971
/* 4. Initialize the buddy allocator fields */
972
/* All fields are initialized by mem_zalloc(). */
1256
/********************************************************************//**
1257
free one buffer pool instance */
977
/************************************************************************
978
Frees the buffer pool at shutdown. This must not be invoked before
979
freeing all mutexes. */
1260
buf_pool_free_instance(
1261
/*===================*/
1262
buf_pool_t* buf_pool) /* in,own: buffer pool instance
1265
985
buf_chunk_t* chunk;
1266
986
buf_chunk_t* chunks;
1274
994
os_mem_free_large(chunk->mem, chunk->mem_size);
1277
mem_free(buf_pool->chunks);
1278
hash_table_free(buf_pool->page_hash);
1279
hash_table_free(buf_pool->zip_hash);
1282
/********************************************************************//**
1283
Creates the buffer pool.
1284
@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */
1289
ulint total_size, /*!< in: size of the total pool in bytes */
1290
ulint n_instances) /*!< in: number of instances */
1293
const ulint size = total_size / n_instances;
1295
ut_ad(n_instances > 0);
1296
ut_ad(n_instances <= MAX_BUFFER_POOLS);
1297
ut_ad(n_instances == srv_buf_pool_instances);
1299
/* We create an extra buffer pool instance, this instance is used
1300
for flushing the flush lists, to keep track of n_flush for all
1301
the buffer pools and also used as a waiting object during flushing. */
1302
void *buf_pool_void_ptr= mem_zalloc(n_instances * sizeof *buf_pool_ptr);
1303
buf_pool_ptr = static_cast<buf_pool_struct *>(buf_pool_void_ptr);
1305
for (i = 0; i < n_instances; i++) {
1306
buf_pool_t* ptr = &buf_pool_ptr[i];
1308
if (buf_pool_init_instance(ptr, size, i) != DB_SUCCESS) {
1310
/* Free all the instances created so far. */
1317
buf_pool_set_sizes();
1318
buf_LRU_old_ratio_update(100 * 3/ 8, FALSE);
1320
btr_search_sys_create(buf_pool_get_curr_size() / sizeof(void*) / 64);
1325
/********************************************************************//**
1326
Frees the buffer pool at shutdown. This must not be invoked before
1327
freeing all mutexes. */
1332
ulint n_instances) /*!< in: numbere of instances to free */
1336
for (i = 0; i < n_instances; i++) {
1337
buf_pool_free_instance(buf_pool_from_array(i));
1340
mem_free(buf_pool_ptr);
1341
buf_pool_ptr = NULL;
1344
/********************************************************************//**
1345
Drops adaptive hash index for a buffer pool instance. */
1348
buf_pool_drop_hash_index_instance(
1349
/*==============================*/
1350
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1351
ibool* released_search_latch) /*!< out: flag for signalling
1352
whether the search latch was
1355
buf_chunk_t* chunks = buf_pool->chunks;
1356
buf_chunk_t* chunk = chunks + buf_pool->n_chunks;
1358
while (--chunk >= chunks) {
1360
buf_block_t* block = chunk->blocks;
1362
for (i = chunk->size; i--; block++) {
1363
/* block->is_hashed cannot be modified
1364
when we have an x-latch on btr_search_latch;
1365
see the comment in buf0buf.h */
1367
if (!block->is_hashed) {
1371
/* To follow the latching order, we
1372
have to release btr_search_latch
1373
before acquiring block->latch. */
1374
rw_lock_x_unlock(&btr_search_latch);
1375
/* When we release the search latch,
1376
we must rescan all blocks, because
1377
some may become hashed again. */
1378
*released_search_latch = TRUE;
1380
rw_lock_x_lock(&block->lock);
1382
/* This should be guaranteed by the
1383
callers, which will be holding
1384
btr_search_enabled_mutex. */
1385
ut_ad(!btr_search_enabled);
1387
/* Because we did not buffer-fix the
1388
block by calling buf_block_get_gen(),
1389
it is possible that the block has been
1390
allocated for some other use after
1391
btr_search_latch was released above.
1392
We do not care which file page the
1393
block is mapped to. All we want to do
1394
is to drop any hash entries referring
1397
/* It is possible that
1398
block->page.state != BUF_FILE_PAGE.
1399
Even that does not matter, because
1400
btr_search_drop_page_hash_index() will
1401
check block->is_hashed before doing
1402
anything. block->is_hashed can only
1403
be set on uncompressed file pages. */
1405
btr_search_drop_page_hash_index(block);
1407
rw_lock_x_unlock(&block->lock);
1409
rw_lock_x_lock(&btr_search_latch);
1411
ut_ad(!btr_search_enabled);
1416
/********************************************************************//**
1417
Drops the adaptive hash index. To prevent a livelock, this function
1418
is only to be called while holding btr_search_latch and while
1419
btr_search_enabled == FALSE. */
1422
buf_pool_drop_hash_index(void)
1423
/*==========================*/
1425
ibool released_search_latch;
1427
#ifdef UNIV_SYNC_DEBUG
1428
ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX));
1429
#endif /* UNIV_SYNC_DEBUG */
1430
ut_ad(!btr_search_enabled);
1435
released_search_latch = FALSE;
1437
for (i = 0; i < srv_buf_pool_instances; i++) {
1438
buf_pool_t* buf_pool;
1440
buf_pool = buf_pool_from_array(i);
1442
buf_pool_drop_hash_index_instance(
1443
buf_pool, &released_search_latch);
1446
} while (released_search_latch);
1449
/********************************************************************//**
997
buf_pool->n_chunks = 0;
1000
/************************************************************************
1450
1001
Relocate a buffer control block. Relocates the block on the LRU list
1451
1002
and in buf_pool->page_hash. Does not relocate bpage->list.
1452
1003
The caller must take care of relocating bpage->list. */
1804
buf_flush_list_mutex_exit(buf_pool);
1805
buf_pool_mutex_exit(buf_pool);
1808
/********************************************************************
1809
Determine if a block is a sentinel for a buffer pool watch.
1810
@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
1813
buf_pool_watch_is_sentinel(
1814
/*=======================*/
1815
buf_pool_t* buf_pool, /*!< buffer pool instance */
1816
const buf_page_t* bpage) /*!< in: block */
1818
ut_ad(buf_page_in_file(bpage));
1820
if (bpage < &buf_pool->watch[0]
1821
|| bpage >= &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
1823
ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
1824
|| bpage->zip.data != NULL);
1829
ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
1830
ut_ad(!bpage->in_zip_hash);
1831
ut_ad(bpage->in_page_hash);
1832
ut_ad(bpage->zip.data == NULL);
1833
ut_ad(bpage->buf_fix_count > 0);
1837
/****************************************************************//**
1838
Add watch for the given page to be read in. Caller must have the buffer pool
1840
@return NULL if watch set, block if the page is in the buffer pool */
1845
ulint space, /*!< in: space id */
1846
ulint offset, /*!< in: page number */
1847
ulint fold) /*!< in: buf_page_address_fold(space, offset) */
1851
buf_pool_t* buf_pool = buf_pool_get(space, offset);
1853
ut_ad(buf_pool_mutex_own(buf_pool));
1855
bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
1857
if (UNIV_LIKELY_NULL(bpage)) {
1858
if (!buf_pool_watch_is_sentinel(buf_pool, bpage)) {
1859
/* The page was loaded meanwhile. */
1862
/* Add to an existing watch. */
1863
bpage->buf_fix_count++;
1867
for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
1868
bpage = &buf_pool->watch[i];
1870
ut_ad(bpage->access_time == 0);
1871
ut_ad(bpage->newest_modification == 0);
1872
ut_ad(bpage->oldest_modification == 0);
1873
ut_ad(bpage->zip.data == NULL);
1874
ut_ad(!bpage->in_zip_hash);
1876
switch (bpage->state) {
1877
case BUF_BLOCK_POOL_WATCH:
1878
ut_ad(!bpage->in_page_hash);
1879
ut_ad(bpage->buf_fix_count == 0);
1881
/* bpage is pointing to buf_pool->watch[],
1882
which is protected by buf_pool->mutex.
1883
Normally, buf_page_t objects are protected by
1884
buf_block_t::mutex or buf_pool->zip_mutex or both. */
1886
bpage->state = BUF_BLOCK_ZIP_PAGE;
1887
bpage->space = space;
1888
bpage->offset = offset;
1889
bpage->buf_fix_count = 1;
1891
ut_d(bpage->in_page_hash = TRUE);
1892
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
1895
case BUF_BLOCK_ZIP_PAGE:
1896
ut_ad(bpage->in_page_hash);
1897
ut_ad(bpage->buf_fix_count > 0);
1904
/* Allocation failed. Either the maximum number of purge
1905
threads should never exceed BUF_POOL_WATCH_SIZE, or this code
1906
should be modified to return a special non-NULL value and the
1907
caller should purge the record directly. */
1910
/* Fix compiler warning */
1914
/********************************************************************//**
1915
Rebuild buf_pool->page_hash. */
1918
buf_pool_page_hash_rebuild(void)
1919
/*============================*/
1923
for (i = 0; i < srv_buf_pool_instances; i++) {
1924
buf_pool_page_hash_rebuild_instance(buf_pool_from_array(i));
1928
/********************************************************************//**
1929
Increase the buffer pool size of one buffer pool instance. */
1932
buf_pool_increase_instance(
1933
/*=======================*/
1934
buf_pool_t* buf_pool, /*!< in: buffer pool instane */
1935
ulint change_size) /*!< in: new size of the pool */
1937
buf_chunk_t* chunks;
1940
buf_pool_mutex_enter(buf_pool);
1941
chunks = static_cast<buf_chunk_t *>(mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks));
1943
memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks * sizeof *chunks);
1945
chunk = &chunks[buf_pool->n_chunks];
1947
if (!buf_chunk_init(buf_pool, chunk, change_size)) {
1950
buf_pool->old_pool_size = buf_pool->curr_pool_size;
1951
buf_pool->curr_size += chunk->size;
1952
buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
1953
mem_free(buf_pool->chunks);
1954
buf_pool->chunks = chunks;
1955
buf_pool->n_chunks++;
1958
buf_pool_mutex_exit(buf_pool);
1961
/********************************************************************//**
1962
Increase the buffer pool size. */
1971
for (i = 0; i < srv_buf_pool_instances; i++) {
1972
buf_pool_increase_instance(
1973
buf_pool_from_array(i),
1974
change_size / srv_buf_pool_instances);
1977
buf_pool_set_sizes();
1980
/********************************************************************//**
1323
buf_pool_mutex_exit();
1326
/************************************************************************
1981
1327
Resizes the buffer pool. */
1984
1330
buf_pool_resize(void)
1985
1331
/*=================*/
1988
ulint min_change_size = 1048576 * srv_buf_pool_instances;
1990
buf_pool_mutex_enter_all();
1992
if (srv_buf_pool_old_size == srv_buf_pool_size) {
1994
buf_pool_mutex_exit_all();
1998
} else if (srv_buf_pool_curr_size + min_change_size
1999
> srv_buf_pool_size) {
2001
change_size = (srv_buf_pool_curr_size - srv_buf_pool_size)
2004
buf_pool_mutex_exit_all();
2006
/* Disable adaptive hash indexes and empty the index
2007
in order to free up memory in the buffer pool chunks. */
2008
buf_pool_shrink(change_size);
2010
} else if (srv_buf_pool_curr_size + min_change_size
2011
< srv_buf_pool_size) {
2013
/* Enlarge the buffer pool by at least one megabyte */
2015
change_size = srv_buf_pool_size - srv_buf_pool_curr_size;
2017
buf_pool_mutex_exit_all();
2019
buf_pool_increase(change_size);
2021
srv_buf_pool_size = srv_buf_pool_old_size;
2023
buf_pool_mutex_exit_all();
1333
buf_pool_mutex_enter();
1335
if (srv_buf_pool_old_size == srv_buf_pool_size) {
1337
buf_pool_mutex_exit();
2028
buf_pool_page_hash_rebuild();
2031
/****************************************************************//**
2032
Remove the sentinel block for the watch before replacing it with a real block.
2033
buf_page_watch_clear() or buf_page_watch_occurred() will notice that
2034
the block has been replaced with the real block.
2035
@return reference count, to be added to the replacement block */
2038
buf_pool_watch_remove(
2039
/*==================*/
2040
buf_pool_t* buf_pool, /*!< buffer pool instance */
2041
ulint fold, /*!< in: buf_page_address_fold(
2043
buf_page_t* watch) /*!< in/out: sentinel for watch */
2045
ut_ad(buf_pool_mutex_own(buf_pool));
2047
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, watch);
2048
ut_d(watch->in_page_hash = FALSE);
2049
watch->buf_fix_count = 0;
2050
watch->state = BUF_BLOCK_POOL_WATCH;
2053
/****************************************************************//**
2054
Stop watching if the page has been read in.
2055
buf_pool_watch_set(space,offset) must have returned NULL before. */
2058
buf_pool_watch_unset(
1341
if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) {
1343
buf_pool_mutex_exit();
1345
/* Disable adaptive hash indexes and empty the index
1346
in order to free up memory in the buffer pool chunks. */
1347
buf_pool_shrink((srv_buf_pool_curr_size - srv_buf_pool_size)
1349
} else if (srv_buf_pool_curr_size + 1048576 < srv_buf_pool_size) {
1351
/* Enlarge the buffer pool by at least one megabyte */
1354
= srv_buf_pool_size - srv_buf_pool_curr_size;
1355
buf_chunk_t* chunks;
1358
chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
1360
memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks
1363
chunk = &chunks[buf_pool->n_chunks];
1365
if (!buf_chunk_init(chunk, mem_size)) {
1368
buf_pool->curr_size += chunk->size;
1369
srv_buf_pool_curr_size = buf_pool->curr_size
1371
mem_free(buf_pool->chunks);
1372
buf_pool->chunks = chunks;
1373
buf_pool->n_chunks++;
1376
srv_buf_pool_old_size = srv_buf_pool_size;
1377
buf_pool_mutex_exit();
1380
buf_pool_page_hash_rebuild();
1383
/************************************************************************
1384
Moves to the block to the start of the LRU list if there is a danger
1385
that the block would drift out of the buffer pool. */
1388
buf_block_make_young(
2059
1389
/*=================*/
2060
ulint space, /*!< in: space id */
2061
ulint offset) /*!< in: page number */
1390
buf_page_t* bpage) /* in: block to make younger */
2064
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2065
ulint fold = buf_page_address_fold(space, offset);
2067
buf_pool_mutex_enter(buf_pool);
2068
bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
2069
/* The page must exist because buf_pool_watch_set()
2070
increments buf_fix_count. */
2073
if (UNIV_UNLIKELY(!buf_pool_watch_is_sentinel(buf_pool, bpage))) {
2074
mutex_t* mutex = buf_page_get_mutex(bpage);
2077
ut_a(bpage->buf_fix_count > 0);
2078
bpage->buf_fix_count--;
2081
ut_a(bpage->buf_fix_count > 0);
2083
if (UNIV_LIKELY(!--bpage->buf_fix_count)) {
2084
buf_pool_watch_remove(buf_pool, fold, bpage);
1392
ut_ad(!buf_pool_mutex_own());
1394
/* Note that we read freed_page_clock's without holding any mutex:
1395
this is allowed since the result is used only in heuristics */
1397
if (buf_page_peek_if_too_old(bpage)) {
1399
buf_pool_mutex_enter();
1400
/* There has been freeing activity in the LRU list:
1401
best to move to the head of the LRU list */
1403
buf_LRU_make_block_young(bpage);
1404
buf_pool_mutex_exit();
2088
buf_pool_mutex_exit(buf_pool);
2091
/****************************************************************//**
2092
Check if the page has been read in.
2093
This may only be called after buf_pool_watch_set(space,offset)
2094
has returned NULL and before invoking buf_pool_watch_unset(space,offset).
2095
@return FALSE if the given page was not read in, TRUE if it was */
2098
buf_pool_watch_occurred(
2099
/*====================*/
2100
ulint space, /*!< in: space id */
2101
ulint offset) /*!< in: page number */
2105
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2106
ulint fold = buf_page_address_fold(space, offset);
2108
buf_pool_mutex_enter(buf_pool);
2110
bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
2111
/* The page must exist because buf_pool_watch_set()
2112
increments buf_fix_count. */
2114
ret = !buf_pool_watch_is_sentinel(buf_pool, bpage);
2115
buf_pool_mutex_exit(buf_pool);
2120
/********************************************************************//**
1408
/************************************************************************
2121
1409
Moves a page to the start of the buffer pool LRU list. This high-level
2122
function can be used to prevent an important page from slipping out of
1410
function can be used to prevent an important page from from slipping out of
2123
1411
the buffer pool. */
2126
1414
buf_page_make_young(
2127
1415
/*================*/
2128
buf_page_t* bpage) /*!< in: buffer block of a file page */
1416
buf_page_t* bpage) /* in: buffer block of a file page */
2130
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
2132
buf_pool_mutex_enter(buf_pool);
1418
buf_pool_mutex_enter();
2134
1420
ut_a(buf_page_in_file(bpage));
2136
1422
buf_LRU_make_block_young(bpage);
2138
buf_pool_mutex_exit(buf_pool);
2141
/********************************************************************//**
2142
Sets the time of the first access of a page and moves a page to the
2143
start of the buffer pool LRU list if it is too old. This high-level
2144
function can be used to prevent an important page from slipping
2145
out of the buffer pool. */
2148
buf_page_set_accessed_make_young(
2149
/*=============================*/
2150
buf_page_t* bpage, /*!< in/out: buffer block of a
2152
unsigned access_time) /*!< in: bpage->access_time
2153
read under mutex protection,
2156
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
2158
ut_ad(!buf_pool_mutex_own(buf_pool));
2159
ut_a(buf_page_in_file(bpage));
2161
if (buf_page_peek_if_too_old(bpage)) {
2162
buf_pool_mutex_enter(buf_pool);
2163
buf_LRU_make_block_young(bpage);
2164
buf_pool_mutex_exit(buf_pool);
2165
} else if (!access_time) {
2166
ulint time_ms = ut_time_ms();
2167
buf_pool_mutex_enter(buf_pool);
2168
buf_page_set_accessed(bpage, time_ms);
2169
buf_pool_mutex_exit(buf_pool);
2173
/********************************************************************//**
1424
buf_pool_mutex_exit();
1427
/************************************************************************
2174
1428
Resets the check_index_page_at_flush field of a page if found in the buffer
2178
1432
buf_reset_check_index_page_at_flush(
2179
1433
/*================================*/
2180
ulint space, /*!< in: space id */
2181
ulint offset) /*!< in: page number */
1434
ulint space, /* in: space id */
1435
ulint offset) /* in: page number */
2183
1437
buf_block_t* block;
2184
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2186
buf_pool_mutex_enter(buf_pool);
2188
block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
1439
buf_pool_mutex_enter();
1441
block = (buf_block_t*) buf_page_hash_get(space, offset);
2190
1443
if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) {
2191
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
2192
1444
block->check_index_page_at_flush = FALSE;
2195
buf_pool_mutex_exit(buf_pool);
1447
buf_pool_mutex_exit();
2198
/********************************************************************//**
1450
/************************************************************************
2199
1451
Returns the current state of is_hashed of a page. FALSE if the page is
2200
1452
not in the pool. NOTE that this operation does not fix the page in the
2201
pool if it is found there.
2202
@return TRUE if page hash index is built in search system */
1453
pool if it is found there. */
2205
1456
buf_page_peek_if_search_hashed(
2206
1457
/*===========================*/
2207
ulint space, /*!< in: space id */
2208
ulint offset) /*!< in: page number */
1458
/* out: TRUE if page hash index is built in search
1460
ulint space, /* in: space id */
1461
ulint offset) /* in: page number */
2210
1463
buf_block_t* block;
2211
1464
ibool is_hashed;
2212
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2214
buf_pool_mutex_enter(buf_pool);
2216
block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
1466
buf_pool_mutex_enter();
1468
block = (buf_block_t*) buf_page_hash_get(space, offset);
2218
1470
if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
2219
1471
is_hashed = FALSE;
2221
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
2222
1473
is_hashed = block->is_hashed;
2225
buf_pool_mutex_exit(buf_pool);
1476
buf_pool_mutex_exit();
2227
1478
return(is_hashed);
2230
1481
#ifdef UNIV_DEBUG_FILE_ACCESSES
2231
/********************************************************************//**
1482
/************************************************************************
2232
1483
Sets file_page_was_freed TRUE if the page is found in the buffer pool.
2233
1484
This function should be called when we free a file page and want the
2234
1485
debug version to check that it is not accessed any more unless
2236
@return control block if found in page hash table, otherwise NULL */
2239
1489
buf_page_set_file_page_was_freed(
2240
1490
/*=============================*/
2241
ulint space, /*!< in: space id */
2242
ulint offset) /*!< in: page number */
1491
/* out: control block if found in page hash table,
1493
ulint space, /* in: space id */
1494
ulint offset) /* in: page number */
2244
1496
buf_page_t* bpage;
2245
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2247
buf_pool_mutex_enter(buf_pool);
2249
bpage = buf_page_hash_get(buf_pool, space, offset);
1498
buf_pool_mutex_enter();
1500
bpage = buf_page_hash_get(space, offset);
2252
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
2253
1503
bpage->file_page_was_freed = TRUE;
2256
buf_pool_mutex_exit(buf_pool);
1506
buf_pool_mutex_exit();
2261
/********************************************************************//**
1511
/************************************************************************
2262
1512
Sets file_page_was_freed FALSE if the page is found in the buffer pool.
2263
1513
This function should be called when we free a file page and want the
2264
1514
debug version to check that it is not accessed any more unless
2266
@return control block if found in page hash table, otherwise NULL */
2269
1518
buf_page_reset_file_page_was_freed(
2270
1519
/*===============================*/
2271
ulint space, /*!< in: space id */
2272
ulint offset) /*!< in: page number */
1520
/* out: control block if found in page hash table,
1522
ulint space, /* in: space id */
1523
ulint offset) /* in: page number */
2274
1525
buf_page_t* bpage;
2275
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2277
buf_pool_mutex_enter(buf_pool);
2279
bpage = buf_page_hash_get(buf_pool, space, offset);
1527
buf_pool_mutex_enter();
1529
bpage = buf_page_hash_get(space, offset);
2282
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
2283
1532
bpage->file_page_was_freed = FALSE;
2286
buf_pool_mutex_exit(buf_pool);
1535
buf_pool_mutex_exit();
2290
1539
#endif /* UNIV_DEBUG_FILE_ACCESSES */
2292
/********************************************************************//**
1541
/************************************************************************
2293
1542
Get read access to a compressed page (usually of type
2294
1543
FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2).
2295
1544
The page must be released with buf_page_release_zip().
2296
1545
NOTE: the page is not protected by any latch. Mutual exclusion has to
2297
1546
be implemented at a higher level. In other words, all possible
2298
1547
accesses to a given page through this function must be protected by
2299
the same set of mutexes or latches.
2300
@return pointer to the block */
1548
the same set of mutexes or latches. */
2303
1551
buf_page_get_zip(
2304
1552
/*=============*/
2305
ulint space, /*!< in: space id */
2306
ulint zip_size,/*!< in: compressed page size */
2307
ulint offset) /*!< in: page number */
1553
/* out: pointer to the block */
1554
ulint space, /* in: space id */
1555
ulint zip_size,/* in: compressed page size */
1556
ulint offset) /* in: page number */
2309
1558
buf_page_t* bpage;
2310
1559
mutex_t* block_mutex;
2311
1560
ibool must_read;
2312
unsigned access_time;
2313
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2315
1562
#ifndef UNIV_LOG_DEBUG
2316
1563
ut_ad(!ibuf_inside());
2318
buf_pool->stat.n_page_gets++;
1565
buf_pool->n_page_gets++;
2321
buf_pool_mutex_enter(buf_pool);
1568
buf_pool_mutex_enter();
2323
bpage = buf_page_hash_get(buf_pool, space, offset);
1570
bpage = buf_page_hash_get(space, offset);
2325
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
2329
1575
/* Page not in buf_pool: needs to be read from file */
2331
buf_pool_mutex_exit(buf_pool);
1577
buf_pool_mutex_exit();
2333
1579
buf_read_page(space, zip_size, offset);
2514
#ifndef UNIV_HOTBACKUP
2515
/*******************************************************************//**
2516
Gets the block to whose frame the pointer is pointing to if found
2517
in this buffer pool instance.
2518
@return pointer to block */
2521
buf_block_align_instance(
2522
/*=====================*/
2523
buf_pool_t* buf_pool, /*!< in: buffer in which the block
2525
const byte* ptr) /*!< in: pointer to a frame */
2530
/* TODO: protect buf_pool->chunks with a mutex (it will
2531
currently remain constant after buf_pool_init()) */
2532
for (chunk = buf_pool->chunks, i = buf_pool->n_chunks; i--; chunk++) {
2533
lint offs = ptr - chunk->blocks->frame;
2535
if (UNIV_UNLIKELY(offs < 0)) {
2540
offs >>= UNIV_PAGE_SIZE_SHIFT;
2542
if (UNIV_LIKELY((ulint) offs < chunk->size)) {
2543
buf_block_t* block = &chunk->blocks[offs];
2545
/* The function buf_chunk_init() invokes
2546
buf_block_init() so that block[n].frame ==
2547
block->frame + n * UNIV_PAGE_SIZE. Check it. */
2548
ut_ad(block->frame == page_align(ptr));
2550
/* A thread that updates these fields must
2551
hold buf_pool->mutex and block->mutex. Acquire
2553
mutex_enter(&block->mutex);
2555
switch (buf_block_get_state(block)) {
2556
case BUF_BLOCK_ZIP_FREE:
2557
case BUF_BLOCK_ZIP_PAGE:
2558
case BUF_BLOCK_ZIP_DIRTY:
2559
/* These types should only be used in
2560
the compressed buffer pool, whose
2561
memory is allocated from
2562
buf_pool->chunks, in UNIV_PAGE_SIZE
2563
blocks flagged as BUF_BLOCK_MEMORY. */
2566
case BUF_BLOCK_NOT_USED:
2567
case BUF_BLOCK_READY_FOR_USE:
2568
case BUF_BLOCK_MEMORY:
2569
/* Some data structures contain
2570
"guess" pointers to file pages. The
2571
file pages may have been freed and
2572
reused. Do not complain. */
2574
case BUF_BLOCK_REMOVE_HASH:
2575
/* buf_LRU_block_remove_hashed_page()
2576
will overwrite the FIL_PAGE_OFFSET and
2577
FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID with
2578
0xff and set the state to
2579
BUF_BLOCK_REMOVE_HASH. */
2580
ut_ad(page_get_space_id(page_align(ptr))
2582
ut_ad(page_get_page_no(page_align(ptr))
2585
case BUF_BLOCK_FILE_PAGE:
2586
ut_ad(block->page.space
2587
== page_get_space_id(page_align(ptr)));
2588
ut_ad(block->page.offset
2589
== page_get_page_no(page_align(ptr)));
2593
mutex_exit(&block->mutex);
2594
#endif /* UNIV_DEBUG */
2603
/*******************************************************************//**
2604
Gets the block to whose frame the pointer is pointing to.
2605
@return pointer to block, never NULL */
2610
const byte* ptr) /*!< in: pointer to a frame */
2614
for (i = 0; i < srv_buf_pool_instances; i++) {
2617
block = buf_block_align_instance(
2618
buf_pool_from_array(i), ptr);
2624
/* The block should always be found. */
2629
/********************************************************************//**
2630
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
2631
the buf_block_t itself or a member of it. This functions checks one of
2632
the buffer pool instances.
2633
@return TRUE if ptr belongs to a buf_block_t struct */
1752
/************************************************************************
1753
Find out if a buffer block was created by buf_chunk_init(). */
2636
buf_pointer_is_block_field_instance(
2637
/*================================*/
2638
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
2639
const void* ptr) /*!< in: pointer not dereferenced */
1756
buf_block_is_uncompressed(
1757
/*======================*/
1758
/* out: TRUE if "block" has
1759
been added to buf_pool->free
1760
by buf_chunk_init() */
1761
const buf_block_t* block) /* in: pointer to block,
2641
1764
const buf_chunk_t* chunk = buf_pool->chunks;
2642
1765
const buf_chunk_t* const echunk = chunk + buf_pool->n_chunks;
2644
/* TODO: protect buf_pool->chunks with a mutex (it will
2645
currently remain constant after buf_pool_init()) */
1767
ut_ad(buf_pool_mutex_own());
1769
if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
1770
/* The pointer should be aligned. */
2646
1774
while (chunk < echunk) {
2647
if (ptr >= (void *)chunk->blocks
2648
&& ptr < (void *)(chunk->blocks + chunk->size)) {
1775
if (block >= chunk->blocks
1776
&& block < chunk->blocks + chunk->size) {
2659
/********************************************************************//**
2660
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
2661
the buf_block_t itself or a member of it
2662
@return TRUE if ptr belongs to a buf_block_t struct */
2665
buf_pointer_is_block_field(
2666
/*=======================*/
2667
const void* ptr) /*!< in: pointer not dereferenced */
2671
for (i = 0; i < srv_buf_pool_instances; i++) {
2674
found = buf_pointer_is_block_field_instance(
2675
buf_pool_from_array(i), ptr);
2684
/********************************************************************//**
2685
Find out if a buffer block was created by buf_chunk_init().
2686
@return TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */
2689
buf_block_is_uncompressed(
2690
/*======================*/
2691
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
2692
const buf_block_t* block) /*!< in: pointer to block,
2695
ut_ad(buf_pool_mutex_own(buf_pool));
2697
if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
2698
/* The pointer should be aligned. */
2702
return(buf_pointer_is_block_field_instance(buf_pool, (void *)block));
2705
/********************************************************************//**
2706
This is the general function used to get access to a database page.
2707
@return pointer to the block or NULL */
1787
/************************************************************************
1788
This is the general function used to get access to a database page. */
2710
1791
buf_page_get_gen(
2711
1792
/*=============*/
2712
ulint space, /*!< in: space id */
2713
ulint zip_size,/*!< in: compressed page size in bytes
1793
/* out: pointer to the block or NULL */
1794
ulint space, /* in: space id */
1795
ulint zip_size,/* in: compressed page size in bytes
2714
1796
or 0 for uncompressed pages */
2715
ulint offset, /*!< in: page number */
2716
ulint rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
2717
buf_block_t* guess, /*!< in: guessed block or NULL */
2718
ulint mode, /*!< in: BUF_GET, BUF_GET_IF_IN_POOL,
2719
BUF_GET_NO_LATCH, or
2720
BUF_GET_IF_IN_POOL_OR_WATCH */
2721
const char* file, /*!< in: file name */
2722
ulint line, /*!< in: line where called */
2723
mtr_t* mtr) /*!< in: mini-transaction */
1797
ulint offset, /* in: page number */
1798
ulint rw_latch,/* in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
1799
buf_block_t* guess, /* in: guessed block or NULL */
1800
ulint mode, /* in: BUF_GET, BUF_GET_IF_IN_POOL,
1801
BUF_GET_NO_LATCH, BUF_GET_NOWAIT */
1802
const char* file, /* in: file name */
1803
ulint line, /* in: line where called */
1804
mtr_t* mtr) /* in: mini-transaction */
2725
1806
buf_block_t* block;
2727
unsigned access_time;
2728
1808
ulint fix_type;
2729
1809
ibool must_read;
2731
buf_pool_t* buf_pool = buf_pool_get(space, offset);
2734
ut_ad(mtr->state == MTR_ACTIVE);
2735
1812
ut_ad((rw_latch == RW_S_LATCH)
2736
1813
|| (rw_latch == RW_X_LATCH)
2737
1814
|| (rw_latch == RW_NO_LATCH));
2738
1815
ut_ad((mode != BUF_GET_NO_LATCH) || (rw_latch == RW_NO_LATCH));
2739
ut_ad(mode == BUF_GET
2740
|| mode == BUF_GET_IF_IN_POOL
2741
|| mode == BUF_GET_NO_LATCH
2742
|| mode == BUF_GET_IF_IN_POOL_OR_WATCH);
1816
ut_ad((mode == BUF_GET) || (mode == BUF_GET_IF_IN_POOL)
1817
|| (mode == BUF_GET_NO_LATCH) || (mode == BUF_GET_NOWAIT));
2743
1818
ut_ad(zip_size == fil_space_get_zip_size(space));
2744
ut_ad(ut_is_2pow(zip_size));
2745
1819
#ifndef UNIV_LOG_DEBUG
2746
ut_ad(!ibuf_inside() || ibuf_page(space, zip_size, offset, NULL));
1820
ut_ad(!ibuf_inside() || ibuf_page(space, zip_size, offset));
2748
buf_pool->stat.n_page_gets++;
2749
fold = buf_page_address_fold(space, offset);
1822
buf_pool->n_page_gets++;
2752
buf_pool_mutex_enter(buf_pool);
1825
buf_pool_mutex_enter();
2755
1828
/* If the guess is a compressed page descriptor that
4125
3086
#endif /* UNIV_DEBUG */
4127
3088
mutex_exit(buf_page_get_mutex(bpage));
4128
buf_pool_mutex_exit(buf_pool);
4131
/*********************************************************************//**
4132
Asserts that all file pages in the buffer are in a replaceable state.
4136
buf_all_freed_instance(
4137
/*===================*/
4138
buf_pool_t* buf_pool) /*!< in: buffer pool instancce */
4145
buf_pool_mutex_enter(buf_pool);
4147
chunk = buf_pool->chunks;
4149
for (i = buf_pool->n_chunks; i--; chunk++) {
4151
const buf_block_t* block = buf_chunk_not_freed(chunk);
4153
if (UNIV_LIKELY_NULL(block)) {
4155
"Page %lu %lu still fixed or dirty\n",
4156
(ulong) block->page.space,
4157
(ulong) block->page.offset);
4162
buf_pool_mutex_exit(buf_pool);
4167
/*********************************************************************//**
4168
Invalidates file pages in one buffer pool instance */
3089
buf_pool_mutex_exit();
3092
/*************************************************************************
3093
Invalidates the file pages in the buffer pool when an archive recovery is
3094
completed. All the file pages buffered must be in a replaceable state when
3095
this function is called: not latched and not modified. */
4171
buf_pool_invalidate_instance(
4172
/*=========================*/
4173
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
3098
buf_pool_invalidate(void)
3099
/*=====================*/
4178
buf_pool_mutex_enter(buf_pool);
4180
for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
4182
/* As this function is called during startup and
4183
during redo application phase during recovery, InnoDB
4184
is single threaded (apart from IO helper threads) at
4185
this stage. No new write batch can be in intialization
4186
stage at this point. */
4187
ut_ad(buf_pool->init_flush[i] == FALSE);
4189
/* However, it is possible that a write batch that has
4190
been posted earlier is still not complete. For buffer
4191
pool invalidation to proceed we must ensure there is NO
4192
write activity happening. */
4193
if (buf_pool->n_flush[i] > 0) {
4194
buf_pool_mutex_exit(buf_pool);
4195
buf_flush_wait_batch_end(buf_pool, static_cast<buf_flush>(i));
4196
buf_pool_mutex_enter(buf_pool);
4200
buf_pool_mutex_exit(buf_pool);
4202
ut_ad(buf_all_freed_instance(buf_pool));
3103
ut_ad(buf_all_freed());
4206
3107
while (freed) {
4207
freed = buf_LRU_search_and_free_block(buf_pool, 100);
3108
freed = buf_LRU_search_and_free_block(100);
4210
buf_pool_mutex_enter(buf_pool);
3111
buf_pool_mutex_enter();
4212
3113
ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
4213
3114
ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
4215
buf_pool->freed_page_clock = 0;
4216
buf_pool->LRU_old = NULL;
4217
buf_pool->LRU_old_len = 0;
4218
buf_pool->LRU_flush_ended = 0;
4220
memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
4221
buf_refresh_io_stats(buf_pool);
4223
buf_pool_mutex_exit(buf_pool);
3116
buf_pool_mutex_exit();
4226
/*********************************************************************//**
4227
Invalidates the file pages in the buffer pool when an archive recovery is
4228
completed. All the file pages buffered must be in a replaceable state when
4229
this function is called: not latched and not modified. */
3119
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
3120
/*************************************************************************
3121
Validates the buffer buf_pool data structure. */
4232
buf_pool_invalidate(void)
4233
/*=====================*/
4237
for (i = 0; i < srv_buf_pool_instances; i++) {
4238
buf_pool_invalidate_instance(buf_pool_from_array(i));
4242
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
4243
/*********************************************************************//**
4244
Validates data in one buffer pool instance
4248
buf_pool_validate_instance(
4249
/*=======================*/
4250
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
4253
3128
buf_chunk_t* chunk;
4829
3612
+ buf_pool->init_flush[BUF_FLUSH_LIST],
4830
3613
(ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);
4832
buf_flush_list_mutex_exit(buf_pool);
4834
3615
current_time = time(NULL);
4835
3616
time_elapsed = 0.001 + difftime(current_time,
4836
3617
buf_pool->last_printout_time);
3618
buf_pool->last_printout_time = current_time;
4839
"Pages made young %lu, not young %lu\n"
4840
"%.2f youngs/s, %.2f non-youngs/s\n"
4841
3621
"Pages read %lu, created %lu, written %lu\n"
4842
3622
"%.2f reads/s, %.2f creates/s, %.2f writes/s\n",
4843
(ulong) buf_pool->stat.n_pages_made_young,
4844
(ulong) buf_pool->stat.n_pages_not_made_young,
4845
(buf_pool->stat.n_pages_made_young
4846
- buf_pool->old_stat.n_pages_made_young)
4848
(buf_pool->stat.n_pages_not_made_young
4849
- buf_pool->old_stat.n_pages_not_made_young)
4851
(ulong) buf_pool->stat.n_pages_read,
4852
(ulong) buf_pool->stat.n_pages_created,
4853
(ulong) buf_pool->stat.n_pages_written,
4854
(buf_pool->stat.n_pages_read
4855
- buf_pool->old_stat.n_pages_read)
4857
(buf_pool->stat.n_pages_created
4858
- buf_pool->old_stat.n_pages_created)
4860
(buf_pool->stat.n_pages_written
4861
- buf_pool->old_stat.n_pages_written)
3623
(ulong) buf_pool->n_pages_read,
3624
(ulong) buf_pool->n_pages_created,
3625
(ulong) buf_pool->n_pages_written,
3626
(buf_pool->n_pages_read - buf_pool->n_pages_read_old)
3628
(buf_pool->n_pages_created - buf_pool->n_pages_created_old)
3630
(buf_pool->n_pages_written - buf_pool->n_pages_written_old)
4862
3631
/ time_elapsed);
4864
n_gets_diff = buf_pool->stat.n_page_gets
4865
- buf_pool->old_stat.n_page_gets;
4869
"Buffer pool hit rate %lu / 1000,"
4870
" young-making rate %lu / 1000 not %lu / 1000\n",
4872
(1000 - ((1000 * (buf_pool->stat.n_pages_read
4873
- buf_pool->old_stat.n_pages_read))
4874
/ (buf_pool->stat.n_page_gets
4875
- buf_pool->old_stat.n_page_gets))),
4877
(1000 * (buf_pool->stat.n_pages_made_young
4878
- buf_pool->old_stat.n_pages_made_young)
4881
(1000 * (buf_pool->stat.n_pages_not_made_young
4882
- buf_pool->old_stat.n_pages_not_made_young)
3633
if (buf_pool->n_page_gets > buf_pool->n_page_gets_old) {
3634
fprintf(file, "Buffer pool hit rate %lu / 1000\n",
3636
(1000 - ((1000 * (buf_pool->n_pages_read
3637
- buf_pool->n_pages_read_old))
3638
/ (buf_pool->n_page_gets
3639
- buf_pool->n_page_gets_old))));
4885
3641
fputs("No buffer pool page gets since the last printout\n",
4889
/* Statistics about read ahead algorithm */
4890
fprintf(file, "Pages read ahead %.2f/s,"
4891
" evicted without access %.2f/s\n",
4892
(buf_pool->stat.n_ra_pages_read
4893
- buf_pool->old_stat.n_ra_pages_read)
4895
(buf_pool->stat.n_ra_pages_evicted
4896
- buf_pool->old_stat.n_ra_pages_evicted)
3645
buf_pool->n_page_gets_old = buf_pool->n_page_gets;
3646
buf_pool->n_pages_read_old = buf_pool->n_pages_read;
3647
buf_pool->n_pages_created_old = buf_pool->n_pages_created;
3648
buf_pool->n_pages_written_old = buf_pool->n_pages_written;
4899
3650
/* Print some values to help us with visualizing what is
4900
3651
happening with LRU eviction. */
4902
3653
"LRU len: %lu, unzip_LRU len: %lu\n"
4903
3654
"I/O sum[%lu]:cur[%lu], unzip sum[%lu]:cur[%lu]\n",
4904
static_cast<ulint>(UT_LIST_GET_LEN(buf_pool->LRU)),
4905
static_cast<ulint>(UT_LIST_GET_LEN(buf_pool->unzip_LRU)),
3655
UT_LIST_GET_LEN(buf_pool->LRU),
3656
UT_LIST_GET_LEN(buf_pool->unzip_LRU),
4906
3657
buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
4907
3658
buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);
4909
buf_refresh_io_stats(buf_pool);
4910
buf_pool_mutex_exit(buf_pool);
4913
/*********************************************************************//**
4914
Prints info of the buffer i/o. */
4919
FILE* file) /*!< in/out: buffer where to print */
4923
for (i = 0; i < srv_buf_pool_instances; i++) {
4924
buf_pool_t* buf_pool;
4926
buf_pool = buf_pool_from_array(i);
4927
buf_print_io_instance(buf_pool, file);
4931
/**********************************************************************//**
4932
Refreshes the statistics used to print per-second averages. */
4935
buf_refresh_io_stats(
4936
/*=================*/
4937
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
4939
buf_pool->last_printout_time = ut_time();
4940
buf_pool->old_stat = buf_pool->stat;
4943
/**********************************************************************//**
4944
Refreshes the statistics used to print per-second averages. */
4947
buf_refresh_io_stats_all(void)
4948
/*==========================*/
4952
for (i = 0; i < srv_buf_pool_instances; i++) {
4953
buf_pool_t* buf_pool;
4955
buf_pool = buf_pool_from_array(i);
4957
buf_refresh_io_stats(buf_pool);
4961
/**********************************************************************//**
4962
Check if all pages in all buffer pools are in a replacable state.
4963
@return FALSE if not */
3660
buf_pool_mutex_exit();
3663
/**************************************************************************
3664
Refreshes the statistics used to print per-second averages. */
3667
buf_refresh_io_stats(void)
3668
/*======================*/
3670
buf_pool->last_printout_time = time(NULL);
3671
buf_pool->n_page_gets_old = buf_pool->n_page_gets;
3672
buf_pool->n_pages_read_old = buf_pool->n_pages_read;
3673
buf_pool->n_pages_created_old = buf_pool->n_pages_created;
3674
buf_pool->n_pages_written_old = buf_pool->n_pages_written;
3677
/*************************************************************************
3678
Checks that all file pages in the buffer are in a replaceable state. */
4966
3681
buf_all_freed(void)
4967
3682
/*===============*/
4971
for (i = 0; i < srv_buf_pool_instances; i++) {
4972
buf_pool_t* buf_pool;
4974
buf_pool = buf_pool_from_array(i);
4976
if (!buf_all_freed_instance(buf_pool)) {
3689
buf_pool_mutex_enter();
3691
chunk = buf_pool->chunks;
3693
for (i = buf_pool->n_chunks; i--; chunk++) {
3695
const buf_block_t* block = buf_chunk_not_freed(chunk);
3697
if (UNIV_LIKELY_NULL(block)) {
3699
"Page %lu %lu still fixed or dirty\n",
3700
(ulong) block->page.space,
3701
(ulong) block->page.offset);
3706
buf_pool_mutex_exit();
4984
/*********************************************************************//**
3711
/*************************************************************************
4985
3712
Checks that there currently are no pending i/o-operations for the buffer
4987
@return TRUE if there is no pending i/o */
4990
3716
buf_pool_check_no_pending_io(void)
4991
3717
/*==============================*/
3718
/* out: TRUE if there is no pending i/o */
4996
buf_pool_mutex_enter_all();
4998
for (i = 0; i < srv_buf_pool_instances && ret; i++) {
4999
const buf_pool_t* buf_pool;
5001
buf_pool = buf_pool_from_array(i);
5003
if (buf_pool->n_pend_reads
5004
+ buf_pool->n_flush[BUF_FLUSH_LRU]
5005
+ buf_pool->n_flush[BUF_FLUSH_LIST]
5006
+ buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]) {
3722
buf_pool_mutex_enter();
3724
if (buf_pool->n_pend_reads + buf_pool->n_flush[BUF_FLUSH_LRU]
3725
+ buf_pool->n_flush[BUF_FLUSH_LIST]
3726
+ buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]) {
5012
buf_pool_mutex_exit_all();
3732
buf_pool_mutex_exit();
5018
Code currently not used
5019
/*********************************************************************//**
5020
Gets the current length of the free list of buffer blocks.
5021
@return length of the free list */
3737
/*************************************************************************
3738
Gets the current length of the free list of buffer blocks. */
5024
3741
buf_get_free_list_len(void)