1
1
/*****************************************************************************
3
Copyright (C) 1995, 2010, Innobase Oy. All Rights Reserved.
4
Copyright (C) 2008, Google Inc.
3
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved.
4
Copyright (c) 2008, Google Inc.
6
6
Portions of this file contain modifications contributed and copyrighted by
7
7
Google, Inc. Those modifications are gratefully acknowledged and are described
172
172
holding file pages that have been modified in the memory
173
173
but not written to disk yet. The block with the oldest modification
174
174
which has not yet been written to disk is at the end of the chain.
175
The access to this list is protected by buf_pool->flush_list_mutex.
175
The access to this list is protected by flush_list_mutex.
177
177
The chain of unmodified compressed blocks (buf_pool->zip_clean)
178
178
contains the control blocks (buf_page_t) of those compressed pages
246
246
/** Number of attemtps made to read in a page in the buffer pool */
247
247
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
249
/** The buffer pools of the database */
250
UNIV_INTERN buf_pool_t* buf_pool_ptr;
249
/** The buffer buf_pool of the database */
250
UNIV_INTERN buf_pool_t* buf_pool_ptr[MAX_BUFFER_POOLS];
252
252
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
253
253
static ulint buf_dbg_counter = 0; /*!< This is used to insert validation
859
859
block->frame = frame;
861
block->page.buf_pool_index = buf_pool_index(buf_pool);
861
block->page.buf_pool = buf_pool;
862
862
block->page.state = BUF_BLOCK_NOT_USED;
863
863
block->page.buf_fix_count = 0;
864
864
block->page.io_fix = BUF_IO_NONE;
942
942
/* Allocate the block descriptors from
943
943
the start of the memory block. */
944
chunk->blocks = static_cast<buf_block_struct *>(chunk->mem);
944
chunk->blocks = chunk->mem;
946
946
/* Align a pointer to the first frame. Note that when
947
947
os_large_page_size is smaller than UNIV_PAGE_SIZE,
948
948
we may allocate one fewer block than requested. When
949
949
it is bigger, we may allocate more blocks than requested. */
951
frame = static_cast<unsigned char *>(ut_align(chunk->mem, UNIV_PAGE_SIZE));
951
frame = ut_align(chunk->mem, UNIV_PAGE_SIZE);
952
952
chunk->size = chunk->mem_size / UNIV_PAGE_SIZE
953
953
- (frame != chunk->mem);
1293
1294
ulint total_size, /*!< in: size of the total pool in bytes */
1294
1295
ulint n_instances) /*!< in: number of instances */
1297
const ulint size = total_size / n_instances;
1299
ut_ad(n_instances > 0);
1300
ut_ad(n_instances <= MAX_BUFFER_POOLS);
1301
ut_ad(n_instances == srv_buf_pool_instances);
1303
1299
/* We create an extra buffer pool instance, this instance is used
1304
1300
for flushing the flush lists, to keep track of n_flush for all
1305
1301
the buffer pools and also used as a waiting object during flushing. */
1306
void *buf_pool_void_ptr= mem_zalloc(n_instances * sizeof *buf_pool_ptr);
1307
buf_pool_ptr = static_cast<buf_pool_struct *>(buf_pool_void_ptr);
1309
1302
for (i = 0; i < n_instances; i++) {
1310
buf_pool_t* ptr = &buf_pool_ptr[i];
1306
ptr = mem_zalloc(sizeof(*ptr));
1308
size = total_size / n_instances;
1310
buf_pool_ptr[i] = ptr;
1312
1312
if (buf_pool_init_instance(ptr, size, i) != DB_SUCCESS) {
1314
mem_free(buf_pool_ptr[i]);
1314
1316
/* Free all the instances created so far. */
1315
1317
buf_pool_free(i);
1657
1657
buf_pool->old_pool_size = buf_pool->curr_pool_size;
1659
1659
/* Rewrite buf_pool->chunks. Copy everything but max_chunk. */
1660
chunks = static_cast<buf_chunk_t *>(mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks));
1660
chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks);
1661
1661
memcpy(chunks, buf_pool->chunks,
1662
1662
(max_chunk - buf_pool->chunks) * sizeof *chunks);
1663
1663
memcpy(chunks + (max_chunk - buf_pool->chunks),
1882
1882
ut_ad(!bpage->in_page_hash);
1883
1883
ut_ad(bpage->buf_fix_count == 0);
1885
/* bpage is pointing to buf_pool->watch[],
1886
which is protected by buf_pool->mutex.
1885
/* bpage is pointing to buf_pool_watch[],
1886
which is protected by buf_pool_mutex.
1887
1887
Normally, buf_page_t objects are protected by
1888
1888
buf_block_t::mutex or buf_pool->zip_mutex or both. */
1942
1942
buf_chunk_t* chunk;
1944
1944
buf_pool_mutex_enter(buf_pool);
1945
chunks = static_cast<buf_chunk_t *>(mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks));
1945
chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
1947
1947
memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks * sizeof *chunks);
3009
3009
UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
3011
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
3012
if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
3014
/* Try to evict the block from the buffer pool, to use the
3015
insert buffer (change buffer) as much as possible. */
3017
if (buf_LRU_free_block(&block->page, TRUE, NULL)
3019
mutex_exit(&block->mutex);
3020
if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
3021
/* Set the watch, as it would have
3022
been set if the page were not in the
3023
buffer pool in the first place. */
3024
block = (buf_block_t*) buf_pool_watch_set(
3025
space, offset, fold);
3027
if (UNIV_LIKELY_NULL(block)) {
3029
/* The page entered the buffer
3030
pool for some reason. Try to
3035
buf_pool_mutex_exit(buf_pool);
3037
"innodb_change_buffering_debug evict %u %u\n",
3038
(unsigned) space, (unsigned) offset);
3040
} else if (buf_flush_page_try(buf_pool, block)) {
3042
"innodb_change_buffering_debug flush %u %u\n",
3043
(unsigned) space, (unsigned) offset);
3048
/* Failed to evict the page; change it directly */
3050
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
3052
3012
buf_block_buf_fix_inc(block, file, line);
3659
3619
mutex_exit(&block->mutex);
3660
3620
data = buf_buddy_alloc(buf_pool, zip_size, &lru);
3661
3621
mutex_enter(&block->mutex);
3662
block->page.zip.data = static_cast<unsigned char *>(data);
3622
block->page.zip.data = data;
3664
3624
/* To maintain the invariant
3665
3625
block->in_unzip_LRU_list
3682
3642
invocation of buf_buddy_relocate_block() on
3683
3643
uninitialized data. */
3684
3644
data = buf_buddy_alloc(buf_pool, zip_size, &lru);
3685
bpage = static_cast<buf_page_struct *>(buf_buddy_alloc(buf_pool, sizeof *bpage, &lru));
3645
bpage = buf_buddy_alloc(buf_pool, sizeof *bpage, &lru);
3687
3647
/* Initialize the buf_pool pointer. */
3688
bpage->buf_pool_index = buf_pool_index(buf_pool);
3648
bpage->buf_pool = buf_pool;
3690
3650
/* If buf_buddy_alloc() allocated storage from the LRU list,
3691
3651
it released and reacquired buf_pool->mutex. Thus, we must
3712
3672
page_zip_des_init(&bpage->zip);
3713
3673
page_zip_set_size(&bpage->zip, zip_size);
3714
bpage->zip.data = static_cast<unsigned char *>(data);
3674
bpage->zip.data = data;
3716
3676
mutex_enter(&buf_pool->zip_mutex);
3717
3677
UNIV_MEM_DESC(bpage->zip.data,
3865
3825
has been added to buf_pool->LRU and buf_pool->page_hash. */
3866
3826
data = buf_buddy_alloc(buf_pool, zip_size, &lru);
3867
3827
mutex_enter(&block->mutex);
3868
block->page.zip.data = static_cast<unsigned char *>(data);
3828
block->page.zip.data = data;
3870
3830
/* To maintain the invariant
3871
3831
block->in_unzip_LRU_list
4196
4156
write activity happening. */
4197
4157
if (buf_pool->n_flush[i] > 0) {
4198
4158
buf_pool_mutex_exit(buf_pool);
4199
buf_flush_wait_batch_end(buf_pool, static_cast<buf_flush>(i));
4159
buf_flush_wait_batch_end(buf_pool, i);
4200
4160
buf_pool_mutex_enter(buf_pool);
4906
4866
"LRU len: %lu, unzip_LRU len: %lu\n"
4907
4867
"I/O sum[%lu]:cur[%lu], unzip sum[%lu]:cur[%lu]\n",
4908
static_cast<ulint>(UT_LIST_GET_LEN(buf_pool->LRU)),
4909
static_cast<ulint>(UT_LIST_GET_LEN(buf_pool->unzip_LRU)),
4868
UT_LIST_GET_LEN(buf_pool->LRU),
4869
UT_LIST_GET_LEN(buf_pool->unzip_LRU),
4910
4870
buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
4911
4871
buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);