~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/buf/buf0buf.cc

Refactor

Show diffs side-by-side

added added

removed removed

Lines of Context:
52
52
#include "log0recv.h"
53
53
#include "page0zip.h"
54
54
 
 
55
#include <drizzled/errmsg_print.h>
 
56
 
55
57
/*
56
58
                IMPLEMENTATION OF THE BUFFER POOL
57
59
                =================================
172
174
holding file pages that have been modified in the memory
173
175
but not written to disk yet. The block with the oldest modification
174
176
which has not yet been written to disk is at the end of the chain.
175
 
The access to this list is protected by flush_list_mutex.
 
177
The access to this list is protected by buf_pool->flush_list_mutex.
176
178
 
177
179
The chain of unmodified compressed blocks (buf_pool->zip_clean)
178
180
contains the control blocks (buf_page_t) of those compressed pages
246
248
/** Number of attemtps made to read in a page in the buffer pool */
247
249
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
248
250
 
249
 
/** The buffer buf_pool of the database */
250
 
UNIV_INTERN buf_pool_t* buf_pool_ptr[MAX_BUFFER_POOLS];
 
251
/** The buffer pools of the database */
 
252
UNIV_INTERN buf_pool_t* buf_pool_ptr;
251
253
 
252
254
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
253
255
static ulint    buf_dbg_counter = 0; /*!< This is used to insert validation
320
322
 
321
323
        /* When we traverse all the flush lists we don't want another
322
324
        thread to add a dirty page to any flush list. */
323
 
        log_flush_order_mutex_enter();
 
325
        if (srv_buf_pool_instances > 1)
 
326
                log_flush_order_mutex_enter();
324
327
 
325
328
        for (i = 0; i < srv_buf_pool_instances; i++) {
326
329
                buf_pool_t*     buf_pool;
527
530
                     < mach_read_from_8(read_buf + FIL_PAGE_LSN))) {
528
531
                        ut_print_timestamp(stderr);
529
532
 
530
 
                        fprintf(stderr,
531
 
                                "  InnoDB: Error: page %lu log sequence number"
532
 
                                " %"PRIu64"\n"
533
 
                                "InnoDB: is in the future! Current system "
534
 
                                "log sequence number %"PRIu64".\n"
535
 
                                "InnoDB: Your database may be corrupt or "
536
 
                                "you may have copied the InnoDB\n"
537
 
                                "InnoDB: tablespace but not the InnoDB "
538
 
                                "log files. See\n"
539
 
                                "InnoDB: " REFMAN "forcing-recovery.html\n"
540
 
                                "InnoDB: for more information.\n",
541
 
                                (ulong) mach_read_from_4(read_buf
542
 
                                                         + FIL_PAGE_OFFSET),
543
 
                                mach_read_from_8(read_buf + FIL_PAGE_LSN),
544
 
                                current_lsn);
 
533
                        drizzled::errmsg_printf(drizzled::error::INFO,
 
534
                                                "InnoDB: Error: page %lu log sequence number %"PRIu64". "
 
535
                                                "InnoDB: is in the future! Current system log sequence number %"PRIu64". "
 
536
                                                "Your database may be corrupt or you may have copied the InnoDB tablespace but not the InnoDB log files. See "
 
537
                                                " " REFMAN "forcing-recovery.html for more information. ",
 
538
                                                (ulong) mach_read_from_4(read_buf
 
539
                                                                         + FIL_PAGE_OFFSET),
 
540
                                                mach_read_from_8(read_buf + FIL_PAGE_LSN),
 
541
                                                current_lsn);
545
542
                }
546
543
        }
547
544
#endif
858
855
 
859
856
        block->frame = frame;
860
857
 
861
 
        block->page.buf_pool = buf_pool;
 
858
        block->page.buf_pool_index = buf_pool_index(buf_pool);
862
859
        block->page.state = BUF_BLOCK_NOT_USED;
863
860
        block->page.buf_fix_count = 0;
864
861
        block->page.io_fix = BUF_IO_NONE;
941
938
 
942
939
        /* Allocate the block descriptors from
943
940
        the start of the memory block. */
944
 
        chunk->blocks = chunk->mem;
 
941
        chunk->blocks = static_cast<buf_block_struct *>(chunk->mem);
945
942
 
946
943
        /* Align a pointer to the first frame.  Note that when
947
944
        os_large_page_size is smaller than UNIV_PAGE_SIZE,
948
945
        we may allocate one fewer block than requested.  When
949
946
        it is bigger, we may allocate more blocks than requested. */
950
947
 
951
 
        frame = ut_align(chunk->mem, UNIV_PAGE_SIZE);
 
948
        frame = static_cast<unsigned char *>(ut_align(chunk->mem, UNIV_PAGE_SIZE));
952
949
        chunk->size = chunk->mem_size / UNIV_PAGE_SIZE
953
950
                - (frame != chunk->mem);
954
951
 
1213
1210
 
1214
1211
        if (buf_pool_size > 0) {
1215
1212
                buf_pool->n_chunks = 1;
1216
 
                buf_pool->chunks = chunk = mem_zalloc(sizeof *chunk);
 
1213
                void *chunk_ptr= mem_zalloc((sizeof *chunk));
 
1214
                buf_pool->chunks = chunk = static_cast<buf_chunk_t *>(chunk_ptr);
1217
1215
 
1218
1216
                UT_LIST_INIT(buf_pool->free);
1219
1217
 
1280
1278
        mem_free(buf_pool->chunks);
1281
1279
        hash_table_free(buf_pool->page_hash);
1282
1280
        hash_table_free(buf_pool->zip_hash);
1283
 
        mem_free(buf_pool);
1284
 
        buf_pool = NULL;
1285
1281
}
1286
1282
 
1287
1283
/********************************************************************//**
1294
1290
        ulint   total_size,     /*!< in: size of the total pool in bytes */
1295
1291
        ulint   n_instances)    /*!< in: number of instances */
1296
1292
{
1297
 
        ulint   i;
 
1293
        ulint           i;
 
1294
        const ulint     size    = total_size / n_instances;
 
1295
 
 
1296
        ut_ad(n_instances > 0);
 
1297
        ut_ad(n_instances <= MAX_BUFFER_POOLS);
 
1298
        ut_ad(n_instances == srv_buf_pool_instances);
1298
1299
 
1299
1300
        /* We create an extra buffer pool instance, this instance is used
1300
1301
        for flushing the flush lists, to keep track of n_flush for all
1301
1302
        the buffer pools and also used as a waiting object during flushing. */
 
1303
        void *buf_pool_void_ptr= mem_zalloc(n_instances * sizeof *buf_pool_ptr);
 
1304
        buf_pool_ptr = static_cast<buf_pool_struct *>(buf_pool_void_ptr);
 
1305
 
1302
1306
        for (i = 0; i < n_instances; i++) {
1303
 
                buf_pool_t*     ptr;
1304
 
                ulint           size;
1305
 
 
1306
 
                ptr = mem_zalloc(sizeof(*ptr));
1307
 
 
1308
 
                size = total_size / n_instances;
1309
 
 
1310
 
                buf_pool_ptr[i] = ptr;
 
1307
                buf_pool_t*     ptr     = &buf_pool_ptr[i];
1311
1308
 
1312
1309
                if (buf_pool_init_instance(ptr, size, i) != DB_SUCCESS) {
1313
1310
 
1314
 
                        mem_free(buf_pool_ptr[i]);
1315
 
 
1316
1311
                        /* Free all the instances created so far. */
1317
1312
                        buf_pool_free(i);
1318
1313
 
1341
1336
 
1342
1337
        for (i = 0; i < n_instances; i++) {
1343
1338
                buf_pool_free_instance(buf_pool_from_array(i));
1344
 
                buf_pool_ptr[i] = NULL;
1345
1339
        }
 
1340
 
 
1341
        mem_free(buf_pool_ptr);
 
1342
        buf_pool_ptr = NULL;
1346
1343
}
1347
1344
 
1348
1345
/********************************************************************//**
1657
1654
        buf_pool->old_pool_size = buf_pool->curr_pool_size;
1658
1655
 
1659
1656
        /* Rewrite buf_pool->chunks.  Copy everything but max_chunk. */
1660
 
        chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks);
 
1657
        chunks = static_cast<buf_chunk_t *>(mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks));
1661
1658
        memcpy(chunks, buf_pool->chunks,
1662
1659
               (max_chunk - buf_pool->chunks) * sizeof *chunks);
1663
1660
        memcpy(chunks + (max_chunk - buf_pool->chunks),
1882
1879
                        ut_ad(!bpage->in_page_hash);
1883
1880
                        ut_ad(bpage->buf_fix_count == 0);
1884
1881
 
1885
 
                        /* bpage is pointing to buf_pool_watch[],
1886
 
                        which is protected by buf_pool_mutex.
 
1882
                        /* bpage is pointing to buf_pool->watch[],
 
1883
                        which is protected by buf_pool->mutex.
1887
1884
                        Normally, buf_page_t objects are protected by
1888
1885
                        buf_block_t::mutex or buf_pool->zip_mutex or both. */
1889
1886
 
1942
1939
        buf_chunk_t*    chunk;
1943
1940
 
1944
1941
        buf_pool_mutex_enter(buf_pool);
1945
 
        chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
 
1942
        chunks = static_cast<buf_chunk_t *>(mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks));
1946
1943
 
1947
1944
        memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks * sizeof *chunks);
1948
1945
 
3008
3005
        bytes. */
3009
3006
        UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
3010
3007
#endif
 
3008
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
 
3009
        if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
 
3010
            && ibuf_debug) {
 
3011
                /* Try to evict the block from the buffer pool, to use the
 
3012
                insert buffer (change buffer) as much as possible. */
 
3013
 
 
3014
                if (buf_LRU_free_block(&block->page, TRUE, NULL)
 
3015
                    == BUF_LRU_FREED) {
 
3016
                        mutex_exit(&block->mutex);
 
3017
                        if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
 
3018
                                /* Set the watch, as it would have
 
3019
                                been set if the page were not in the
 
3020
                                buffer pool in the first place. */
 
3021
                                block = (buf_block_t*) buf_pool_watch_set(
 
3022
                                        space, offset, fold);
 
3023
 
 
3024
                                if (UNIV_LIKELY_NULL(block)) {
 
3025
 
 
3026
                                        /* The page entered the buffer
 
3027
                                        pool for some reason. Try to
 
3028
                                        evict it again. */
 
3029
                                        goto got_block;
 
3030
                                }
 
3031
                        }
 
3032
                        buf_pool_mutex_exit(buf_pool);
 
3033
                        fprintf(stderr,
 
3034
                                "innodb_change_buffering_debug evict %u %u\n",
 
3035
                                (unsigned) space, (unsigned) offset);
 
3036
                        return(NULL);
 
3037
                } else if (buf_flush_page_try(buf_pool, block)) {
 
3038
                        fprintf(stderr,
 
3039
                                "innodb_change_buffering_debug flush %u %u\n",
 
3040
                                (unsigned) space, (unsigned) offset);
 
3041
                        guess = block;
 
3042
                        goto loop;
 
3043
                }
 
3044
 
 
3045
                /* Failed to evict the page; change it directly */
 
3046
        }
 
3047
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
3011
3048
 
3012
3049
        buf_block_buf_fix_inc(block, file, line);
3013
3050
 
3619
3656
                        mutex_exit(&block->mutex);
3620
3657
                        data = buf_buddy_alloc(buf_pool, zip_size, &lru);
3621
3658
                        mutex_enter(&block->mutex);
3622
 
                        block->page.zip.data = data;
 
3659
                        block->page.zip.data = static_cast<unsigned char *>(data);
3623
3660
 
3624
3661
                        /* To maintain the invariant
3625
3662
                        block->in_unzip_LRU_list
3642
3679
                invocation of buf_buddy_relocate_block() on
3643
3680
                uninitialized data. */
3644
3681
                data = buf_buddy_alloc(buf_pool, zip_size, &lru);
3645
 
                bpage = buf_buddy_alloc(buf_pool, sizeof *bpage, &lru);
 
3682
                bpage = static_cast<buf_page_struct *>(buf_buddy_alloc(buf_pool, sizeof *bpage, &lru));
3646
3683
 
3647
3684
                /* Initialize the buf_pool pointer. */
3648
 
                bpage->buf_pool = buf_pool;
 
3685
                bpage->buf_pool_index = buf_pool_index(buf_pool);
3649
3686
 
3650
3687
                /* If buf_buddy_alloc() allocated storage from the LRU list,
3651
3688
                it released and reacquired buf_pool->mutex.  Thus, we must
3671
3708
 
3672
3709
                page_zip_des_init(&bpage->zip);
3673
3710
                page_zip_set_size(&bpage->zip, zip_size);
3674
 
                bpage->zip.data = data;
 
3711
                bpage->zip.data = static_cast<unsigned char *>(data);
3675
3712
 
3676
3713
                mutex_enter(&buf_pool->zip_mutex);
3677
3714
                UNIV_MEM_DESC(bpage->zip.data,
3825
3862
                has been added to buf_pool->LRU and buf_pool->page_hash. */
3826
3863
                data = buf_buddy_alloc(buf_pool, zip_size, &lru);
3827
3864
                mutex_enter(&block->mutex);
3828
 
                block->page.zip.data = data;
 
3865
                block->page.zip.data = static_cast<unsigned char *>(data);
3829
3866
 
3830
3867
                /* To maintain the invariant
3831
3868
                block->in_unzip_LRU_list
4017
4054
                        recv_recover_page(TRUE, (buf_block_t*) bpage);
4018
4055
                }
4019
4056
 
4020
 
                if (uncompressed && !recv_no_ibuf_operations) {
 
4057
                if (uncompressed && !recv_no_ibuf_operations && !srv_fake_write) {
4021
4058
                        ibuf_merge_or_delete_for_page(
4022
4059
                                (buf_block_t*) bpage, bpage->space,
4023
4060
                                bpage->offset, buf_page_get_zip_size(bpage),
4137
4174
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
4138
4175
{
4139
4176
        ibool           freed;
4140
 
        enum buf_flush  i;
 
4177
        int     i;
4141
4178
 
4142
4179
        buf_pool_mutex_enter(buf_pool);
4143
4180
 
4156
4193
                write activity happening. */
4157
4194
                if (buf_pool->n_flush[i] > 0) {
4158
4195
                        buf_pool_mutex_exit(buf_pool);
4159
 
                        buf_flush_wait_batch_end(buf_pool, i);
 
4196
                        buf_flush_wait_batch_end(buf_pool, static_cast<buf_flush>(i));
4160
4197
                        buf_pool_mutex_enter(buf_pool);
4161
4198
                }
4162
4199
        }
4865
4902
        fprintf(file,
4866
4903
                "LRU len: %lu, unzip_LRU len: %lu\n"
4867
4904
                "I/O sum[%lu]:cur[%lu], unzip sum[%lu]:cur[%lu]\n",
4868
 
                UT_LIST_GET_LEN(buf_pool->LRU),
4869
 
                UT_LIST_GET_LEN(buf_pool->unzip_LRU),
 
4905
                static_cast<ulint>(UT_LIST_GET_LEN(buf_pool->LRU)),
 
4906
                static_cast<ulint>(UT_LIST_GET_LEN(buf_pool->unzip_LRU)),
4870
4907
                buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
4871
4908
                buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);
4872
4909