172
172
holding file pages that have been modified in the memory
173
173
but not written to disk yet. The block with the oldest modification
174
174
which has not yet been written to disk is at the end of the chain.
175
The access to this list is protected by flush_list_mutex.
175
The access to this list is protected by buf_pool->flush_list_mutex.
177
177
The chain of unmodified compressed blocks (buf_pool->zip_clean)
178
178
contains the control blocks (buf_page_t) of those compressed pages
246
246
/** Number of attemtps made to read in a page in the buffer pool */
247
247
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
249
/** The buffer buf_pool of the database */
250
UNIV_INTERN buf_pool_t* buf_pool_ptr[MAX_BUFFER_POOLS];
249
/** The buffer pools of the database */
250
UNIV_INTERN buf_pool_t* buf_pool_ptr;
252
252
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
253
253
static ulint buf_dbg_counter = 0; /*!< This is used to insert validation
859
859
block->frame = frame;
861
block->page.buf_pool = buf_pool;
861
block->page.buf_pool_index = buf_pool_index(buf_pool);
862
862
block->page.state = BUF_BLOCK_NOT_USED;
863
863
block->page.buf_fix_count = 0;
864
864
block->page.io_fix = BUF_IO_NONE;
1294
1292
ulint total_size, /*!< in: size of the total pool in bytes */
1295
1293
ulint n_instances) /*!< in: number of instances */
1296
const ulint size = total_size / n_instances;
1298
ut_ad(n_instances > 0);
1299
ut_ad(n_instances <= MAX_BUFFER_POOLS);
1300
ut_ad(n_instances == srv_buf_pool_instances);
1299
1302
/* We create an extra buffer pool instance, this instance is used
1300
1303
for flushing the flush lists, to keep track of n_flush for all
1301
1304
the buffer pools and also used as a waiting object during flushing. */
1305
buf_pool_ptr = mem_zalloc(n_instances * sizeof *buf_pool_ptr);
1302
1307
for (i = 0; i < n_instances; i++) {
1306
ptr = mem_zalloc(sizeof(*ptr));
1308
size = total_size / n_instances;
1310
buf_pool_ptr[i] = ptr;
1308
buf_pool_t* ptr = &buf_pool_ptr[i];
1312
1310
if (buf_pool_init_instance(ptr, size, i) != DB_SUCCESS) {
1314
mem_free(buf_pool_ptr[i]);
1316
1312
/* Free all the instances created so far. */
1317
1313
buf_pool_free(i);
1882
1880
ut_ad(!bpage->in_page_hash);
1883
1881
ut_ad(bpage->buf_fix_count == 0);
1885
/* bpage is pointing to buf_pool_watch[],
1886
which is protected by buf_pool_mutex.
1883
/* bpage is pointing to buf_pool->watch[],
1884
which is protected by buf_pool->mutex.
1887
1885
Normally, buf_page_t objects are protected by
1888
1886
buf_block_t::mutex or buf_pool->zip_mutex or both. */
3009
3007
UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
3009
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
3010
if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
3012
/* Try to evict the block from the buffer pool, to use the
3013
insert buffer (change buffer) as much as possible. */
3015
if (buf_LRU_free_block(&block->page, TRUE, NULL)
3017
mutex_exit(&block->mutex);
3018
if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
3019
/* Set the watch, as it would have
3020
been set if the page were not in the
3021
buffer pool in the first place. */
3022
block = (buf_block_t*) buf_pool_watch_set(
3023
space, offset, fold);
3025
if (UNIV_LIKELY_NULL(block)) {
3027
/* The page entered the buffer
3028
pool for some reason. Try to
3033
buf_pool_mutex_exit(buf_pool);
3035
"innodb_change_buffering_debug evict %u %u\n",
3036
(unsigned) space, (unsigned) offset);
3038
} else if (buf_flush_page_try(buf_pool, block)) {
3040
"innodb_change_buffering_debug flush %u %u\n",
3041
(unsigned) space, (unsigned) offset);
3046
/* Failed to evict the page; change it directly */
3048
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
3012
3050
buf_block_buf_fix_inc(block, file, line);
3645
3683
bpage = buf_buddy_alloc(buf_pool, sizeof *bpage, &lru);
3647
3685
/* Initialize the buf_pool pointer. */
3648
bpage->buf_pool = buf_pool;
3686
bpage->buf_pool_index = buf_pool_index(buf_pool);
3650
3688
/* If buf_buddy_alloc() allocated storage from the LRU list,
3651
3689
it released and reacquired buf_pool->mutex. Thus, we must