~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/buf/buf0buf.c

  • Committer: Lee Bieber
  • Date: 2010-11-14 05:18:07 UTC
  • mfrom: (1921.4.12 catalogs)
  • Revision ID: kalebral@gmail.com-20101114051807-p69h40jbsn1byf84
Merge Brian - add execute with no return

Show diffs side-by-side

added added

removed removed

Lines of Context:
84
84
The buffer buf_pool contains a single mutex which protects all the
85
85
control data structures of the buf_pool. The content of a buffer frame is
86
86
protected by a separate read-write lock in its control block, though.
87
 
These locks can be locked and unlocked without owning the buf_pool->mutex.
 
87
These locks can be locked and unlocked without owning the buf_pool mutex.
88
88
The OS events in the buf_pool struct can be waited for without owning the
89
 
buf_pool->mutex.
 
89
buf_pool mutex.
90
90
 
91
 
The buf_pool->mutex is a hot-spot in main memory, causing a lot of
 
91
The buf_pool mutex is a hot-spot in main memory, causing a lot of
92
92
memory bus traffic on multiprocessor systems when processors
93
93
alternately access the mutex. On our Pentium, the mutex is accessed
94
94
maybe every 10 microseconds. We gave up the solution to have mutexes
95
95
for each control block, for instance, because it seemed to be
96
96
complicated.
97
97
 
98
 
A solution to reduce mutex contention of the buf_pool->mutex is to
 
98
A solution to reduce mutex contention of the buf_pool mutex is to
99
99
create a separate mutex for the page hash table. On Pentium,
100
100
accessing the hash table takes 2 microseconds, about half
101
 
of the total buf_pool->mutex hold time.
 
101
of the total buf_pool mutex hold time.
102
102
 
103
103
                Control blocks
104
104
                --------------
153
153
which we can use when we want to artificially age a page in the
154
154
buf_pool. This is used if we know that some page is not needed
155
155
again for some time: we insert the block right after the pointer,
156
 
causing it to be replaced sooner than would normally be the case.
 
156
causing it to be replaced sooner than would noramlly be the case.
157
157
Currently this aging mechanism is used for read-ahead mechanism
158
158
of pages, and it can also be used when there is a scan of a full
159
159
table which cannot fit in the memory. Putting the pages near the
160
 
end of the LRU list, we make sure that most of the buf_pool stays
161
 
in the main memory, undisturbed.
 
160
of the LRU list, we make sure that most of the buf_pool stays in the
 
161
main memory, undisturbed.
162
162
 
163
163
The unzip_LRU list contains a subset of the common LRU list.  The
164
164
blocks on the unzip_LRU list hold a compressed file page and the
172
172
holding file pages that have been modified in the memory
173
173
but not written to disk yet. The block with the oldest modification
174
174
which has not yet been written to disk is at the end of the chain.
175
 
The access to this list is protected by flush_list_mutex.
176
175
 
177
176
The chain of unmodified compressed blocks (buf_pool->zip_clean)
178
177
contains the control blocks (buf_page_t) of those compressed pages
247
246
static const ulint BUF_PAGE_READ_MAX_RETRIES = 100;
248
247
 
249
248
/** The buffer buf_pool of the database */
250
 
UNIV_INTERN buf_pool_t* buf_pool_ptr[MAX_BUFFER_POOLS];
 
249
UNIV_INTERN buf_pool_t* buf_pool = NULL;
 
250
 
 
251
/** mutex protecting the buffer pool struct and control blocks, except the
 
252
read-write lock in them */
 
253
UNIV_INTERN mutex_t             buf_pool_mutex;
 
254
/** mutex protecting the control blocks of compressed-only pages
 
255
(of type buf_page_t, not buf_block_t) */
 
256
UNIV_INTERN mutex_t             buf_pool_zip_mutex;
251
257
 
252
258
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
253
259
static ulint    buf_dbg_counter = 0; /*!< This is used to insert validation
254
 
                                        operations in execution in the
 
260
                                        operations in excution in the
255
261
                                        debug version */
 
262
/** Flag to forbid the release of the buffer pool mutex.
 
263
Protected by buf_pool_mutex. */
 
264
UNIV_INTERN ulint               buf_pool_mutex_exit_forbidden = 0;
256
265
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
257
266
#ifdef UNIV_DEBUG
258
267
/** If this is set TRUE, the program prints info whenever
260
269
UNIV_INTERN ibool               buf_debug_prints = FALSE;
261
270
#endif /* UNIV_DEBUG */
262
271
 
263
 
#ifdef UNIV_PFS_RWLOCK
264
 
/* Keys to register buffer block related rwlocks and mutexes with
265
 
performance schema */
266
 
UNIV_INTERN mysql_pfs_key_t     buf_block_lock_key;
267
 
# ifdef UNIV_SYNC_DEBUG
268
 
UNIV_INTERN mysql_pfs_key_t     buf_block_debug_latch_key;
269
 
# endif /* UNIV_SYNC_DEBUG */
270
 
#endif /* UNIV_PFS_RWLOCK */
271
 
 
272
 
#ifdef UNIV_PFS_MUTEX
273
 
UNIV_INTERN mysql_pfs_key_t     buffer_block_mutex_key;
274
 
UNIV_INTERN mysql_pfs_key_t     buf_pool_mutex_key;
275
 
UNIV_INTERN mysql_pfs_key_t     buf_pool_zip_mutex_key;
276
 
UNIV_INTERN mysql_pfs_key_t     flush_list_mutex_key;
277
 
#endif /* UNIV_PFS_MUTEX */
278
 
 
279
 
#if defined UNIV_PFS_MUTEX || defined UNIV_PFS_RWLOCK
280
 
# ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
281
 
 
282
 
/* Buffer block mutexes and rwlocks can be registered
283
 
in one group rather than individually. If PFS_GROUP_BUFFER_SYNC
284
 
is defined, register buffer block mutex and rwlock
285
 
in one group after their initialization. */
286
 
#  define PFS_GROUP_BUFFER_SYNC
287
 
 
288
 
/* This define caps the number of mutexes/rwlocks can
289
 
be registered with performance schema. Developers can
290
 
modify this define if necessary. Please note, this would
291
 
be effective only if PFS_GROUP_BUFFER_SYNC is defined. */
292
 
#  define PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER    ULINT_MAX
293
 
 
294
 
# endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
295
 
#endif /* UNIV_PFS_MUTEX || UNIV_PFS_RWLOCK */
296
 
 
297
272
/** A chunk of buffers.  The buffer pool is allocated in chunks. */
298
273
struct buf_chunk_struct{
299
274
        ulint           mem_size;       /*!< allocated size of the chunk */
305
280
#endif /* !UNIV_HOTBACKUP */
306
281
 
307
282
/********************************************************************//**
308
 
Gets the smallest oldest_modification lsn for any page in the pool. Returns
309
 
zero if all modified pages have been flushed to disk.
310
 
@return oldest modification in pool, zero if none */
311
 
UNIV_INTERN
312
 
ib_uint64_t
313
 
buf_pool_get_oldest_modification(void)
314
 
/*==================================*/
315
 
{
316
 
        ulint           i;
317
 
        buf_page_t*     bpage;
318
 
        ib_uint64_t     lsn = 0;
319
 
        ib_uint64_t     oldest_lsn = 0;
320
 
 
321
 
        /* When we traverse all the flush lists we don't want another
322
 
        thread to add a dirty page to any flush list. */
323
 
        log_flush_order_mutex_enter();
324
 
 
325
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
326
 
                buf_pool_t*     buf_pool;
327
 
 
328
 
                buf_pool = buf_pool_from_array(i);
329
 
 
330
 
                buf_flush_list_mutex_enter(buf_pool);
331
 
 
332
 
                bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
333
 
 
334
 
                if (bpage != NULL) {
335
 
                        ut_ad(bpage->in_flush_list);
336
 
                        lsn = bpage->oldest_modification;
337
 
                }
338
 
 
339
 
                buf_flush_list_mutex_exit(buf_pool);
340
 
 
341
 
                if (!oldest_lsn || oldest_lsn > lsn) {
342
 
                        oldest_lsn = lsn;
343
 
                }
344
 
        }
345
 
 
346
 
        log_flush_order_mutex_exit();
347
 
 
348
 
        /* The returned answer may be out of date: the flush_list can
349
 
        change after the mutex has been released. */
350
 
 
351
 
        return(oldest_lsn);
352
 
}
353
 
 
354
 
/********************************************************************//**
355
 
Get total buffer pool statistics. */
356
 
UNIV_INTERN
357
 
void
358
 
buf_get_total_list_len(
359
 
/*===================*/
360
 
        ulint*          LRU_len,        /*!< out: length of all LRU lists */
361
 
        ulint*          free_len,       /*!< out: length of all free lists */
362
 
        ulint*          flush_list_len) /*!< out: length of all flush lists */
363
 
{
364
 
        ulint           i;
365
 
 
366
 
        *LRU_len = 0;
367
 
        *free_len = 0;
368
 
        *flush_list_len = 0;
369
 
 
370
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
371
 
                buf_pool_t*     buf_pool;
372
 
 
373
 
                buf_pool = buf_pool_from_array(i);
374
 
                *LRU_len += UT_LIST_GET_LEN(buf_pool->LRU);
375
 
                *free_len += UT_LIST_GET_LEN(buf_pool->free);
376
 
                *flush_list_len += UT_LIST_GET_LEN(buf_pool->flush_list);
377
 
        }
378
 
}
379
 
 
380
 
/********************************************************************//**
381
 
Get total buffer pool statistics. */
382
 
UNIV_INTERN
383
 
void
384
 
buf_get_total_stat(
385
 
/*===============*/
386
 
        buf_pool_stat_t*        tot_stat)       /*!< out: buffer pool stats */
387
 
{
388
 
        ulint                   i;
389
 
 
390
 
        memset(tot_stat, 0, sizeof(*tot_stat));
391
 
 
392
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
393
 
                buf_pool_stat_t*buf_stat;
394
 
                buf_pool_t*     buf_pool;
395
 
 
396
 
                buf_pool = buf_pool_from_array(i);
397
 
 
398
 
                buf_stat = &buf_pool->stat;
399
 
                tot_stat->n_page_gets += buf_stat->n_page_gets;
400
 
                tot_stat->n_pages_read += buf_stat->n_pages_read;
401
 
                tot_stat->n_pages_written += buf_stat->n_pages_written;
402
 
                tot_stat->n_pages_created += buf_stat->n_pages_created;
403
 
                tot_stat->n_ra_pages_read += buf_stat->n_ra_pages_read;
404
 
                tot_stat->n_ra_pages_evicted += buf_stat->n_ra_pages_evicted;
405
 
                tot_stat->n_pages_made_young += buf_stat->n_pages_made_young;
406
 
 
407
 
                tot_stat->n_pages_not_made_young +=
408
 
                        buf_stat->n_pages_not_made_young;
409
 
        }
410
 
}
411
 
 
412
 
/********************************************************************//**
413
 
Allocates a buffer block.
414
 
@return own: the allocated block, in state BUF_BLOCK_MEMORY */
415
 
UNIV_INTERN
416
 
buf_block_t*
417
 
buf_block_alloc(
418
 
/*============*/
419
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
420
 
        ulint           zip_size)       /*!< in: compressed page size in bytes,
421
 
                                        or 0 if uncompressed tablespace */
422
 
{
423
 
        buf_block_t*    block;
424
 
        ulint           index;
425
 
        static ulint    buf_pool_index;
426
 
 
427
 
        if (buf_pool == NULL) {
428
 
                /* We are allocating memory from any buffer pool, ensure
429
 
                we spread the grace on all buffer pool instances. */
430
 
                index = buf_pool_index++ % srv_buf_pool_instances;
431
 
                buf_pool = buf_pool_from_array(index);
432
 
        }
433
 
 
434
 
        block = buf_LRU_get_free_block(buf_pool, zip_size);
435
 
 
436
 
        buf_block_set_state(block, BUF_BLOCK_MEMORY);
437
 
 
438
 
        return(block);
439
 
}
440
 
 
441
 
/********************************************************************//**
442
283
Calculates a page checksum which is stored to the page when it is written
443
284
to a file. Note that we must be careful to calculate the same value on
444
285
32-bit and 64-bit architectures.
522
363
                ib_uint64_t     current_lsn;
523
364
 
524
365
                if (log_peek_lsn(&current_lsn)
525
 
                    && UNIV_UNLIKELY
526
 
                    (current_lsn
527
 
                     < mach_read_from_8(read_buf + FIL_PAGE_LSN))) {
 
366
                    && current_lsn < mach_read_ull(read_buf + FIL_PAGE_LSN)) {
528
367
                        ut_print_timestamp(stderr);
529
368
 
530
369
                        fprintf(stderr,
540
379
                                "InnoDB: for more information.\n",
541
380
                                (ulong) mach_read_from_4(read_buf
542
381
                                                         + FIL_PAGE_OFFSET),
543
 
                                mach_read_from_8(read_buf + FIL_PAGE_LSN),
 
382
                                mach_read_ull(read_buf + FIL_PAGE_LSN),
544
383
                                current_lsn);
545
384
                }
546
385
        }
737
576
#endif /* !UNIV_HOTBACKUP */
738
577
 
739
578
        switch (fil_page_get_type(read_buf)) {
740
 
                index_id_t      index_id;
741
579
        case FIL_PAGE_INDEX:
742
 
                index_id = btr_page_get_index_id(read_buf);
743
580
                fprintf(stderr,
744
581
                        "InnoDB: Page may be an index page where"
745
 
                        " index id is %llu\n",
746
 
                        (ullint) index_id);
 
582
                        " index id is %lu %lu\n",
 
583
                        (ulong) ut_dulint_get_high(
 
584
                                btr_page_get_index_id(read_buf)),
 
585
                        (ulong) ut_dulint_get_low(
 
586
                                btr_page_get_index_id(read_buf)));
747
587
#ifndef UNIV_HOTBACKUP
748
 
                index = dict_index_find_on_id_low(index_id);
 
588
                index = dict_index_find_on_id_low(
 
589
                        btr_page_get_index_id(read_buf));
749
590
                if (index) {
750
591
                        fputs("InnoDB: (", stderr);
751
592
                        dict_index_name_print(stderr, NULL, index);
797
638
}
798
639
 
799
640
#ifndef UNIV_HOTBACKUP
800
 
 
801
 
# ifdef PFS_GROUP_BUFFER_SYNC
802
 
/********************************************************************//**
803
 
This function registers mutexes and rwlocks in buffer blocks with
804
 
performance schema. If PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER is
805
 
defined to be a value less than chunk->size, then only mutexes
806
 
and rwlocks in the first PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER
807
 
blocks are registered. */
808
 
static
809
 
void
810
 
pfs_register_buffer_block(
811
 
/*======================*/
812
 
        buf_chunk_t*    chunk)          /*!< in/out: chunk of buffers */
813
 
{
814
 
        ulint           i;
815
 
        ulint           num_to_register;
816
 
        buf_block_t*    block;
817
 
 
818
 
        block = chunk->blocks;
819
 
 
820
 
        num_to_register = ut_min(chunk->size,
821
 
                                 PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER);
822
 
 
823
 
        for (i = 0; i < num_to_register; i++) {
824
 
                mutex_t*        mutex;
825
 
                rw_lock_t*      rwlock;
826
 
 
827
 
#  ifdef UNIV_PFS_MUTEX
828
 
                mutex = &block->mutex;
829
 
                ut_a(!mutex->pfs_psi);
830
 
                mutex->pfs_psi = (PSI_server)
831
 
                        ? PSI_server->init_mutex(buffer_block_mutex_key, mutex)
832
 
                        : NULL;
833
 
#  endif /* UNIV_PFS_MUTEX */
834
 
 
835
 
#  ifdef UNIV_PFS_RWLOCK
836
 
                rwlock = &block->lock;
837
 
                ut_a(!rwlock->pfs_psi);
838
 
                rwlock->pfs_psi = (PSI_server)
839
 
                        ? PSI_server->init_rwlock(buf_block_lock_key, rwlock)
840
 
                        : NULL;
841
 
#  endif /* UNIV_PFS_RWLOCK */
842
 
                block++;
843
 
        }
844
 
}
845
 
# endif /* PFS_GROUP_BUFFER_SYNC */
846
 
 
847
641
/********************************************************************//**
848
642
Initializes a buffer control block when the buf_pool is created. */
849
643
static
850
644
void
851
645
buf_block_init(
852
646
/*===========*/
853
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
854
 
        buf_block_t*    block,          /*!< in: pointer to control block */
855
 
        byte*           frame)          /*!< in: pointer to buffer frame */
 
647
        buf_block_t*    block,  /*!< in: pointer to control block */
 
648
        byte*           frame)  /*!< in: pointer to buffer frame */
856
649
{
857
650
        UNIV_MEM_DESC(frame, UNIV_PAGE_SIZE, block);
858
651
 
859
652
        block->frame = frame;
860
653
 
861
 
        block->page.buf_pool = buf_pool;
862
654
        block->page.state = BUF_BLOCK_NOT_USED;
863
655
        block->page.buf_fix_count = 0;
864
656
        block->page.io_fix = BUF_IO_NONE;
872
664
        block->check_index_page_at_flush = FALSE;
873
665
        block->index = NULL;
874
666
 
875
 
        block->is_hashed = FALSE;
876
 
 
877
667
#ifdef UNIV_DEBUG
878
668
        block->page.in_page_hash = FALSE;
879
669
        block->page.in_zip_hash = FALSE;
887
677
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
888
678
        page_zip_des_init(&block->page.zip);
889
679
 
890
 
#if defined PFS_SKIP_BUFFER_MUTEX_RWLOCK || defined PFS_GROUP_BUFFER_SYNC
891
 
        /* If PFS_SKIP_BUFFER_MUTEX_RWLOCK is defined, skip registration
892
 
        of buffer block mutex/rwlock with performance schema. If
893
 
        PFS_GROUP_BUFFER_SYNC is defined, skip the registration
894
 
        since buffer block mutex/rwlock will be registered later in
895
 
        pfs_register_buffer_block() */
896
 
 
897
 
        mutex_create(PFS_NOT_INSTRUMENTED, &block->mutex, SYNC_BUF_BLOCK);
898
 
        rw_lock_create(PFS_NOT_INSTRUMENTED, &block->lock, SYNC_LEVEL_VARYING);
899
 
#else /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
900
 
        mutex_create(buffer_block_mutex_key, &block->mutex, SYNC_BUF_BLOCK);
901
 
        rw_lock_create(buf_block_lock_key, &block->lock, SYNC_LEVEL_VARYING);
902
 
#endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
903
 
 
 
680
        mutex_create(&block->mutex, SYNC_BUF_BLOCK);
 
681
 
 
682
        rw_lock_create(&block->lock, SYNC_LEVEL_VARYING);
904
683
        ut_ad(rw_lock_validate(&(block->lock)));
905
684
 
906
685
#ifdef UNIV_SYNC_DEBUG
907
 
        rw_lock_create(buf_block_debug_latch_key,
908
 
                       &block->debug_latch, SYNC_NO_ORDER_CHECK);
 
686
        rw_lock_create(&block->debug_latch, SYNC_NO_ORDER_CHECK);
909
687
#endif /* UNIV_SYNC_DEBUG */
910
688
}
911
689
 
916
694
buf_chunk_t*
917
695
buf_chunk_init(
918
696
/*===========*/
919
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
920
697
        buf_chunk_t*    chunk,          /*!< out: chunk of buffers */
921
698
        ulint           mem_size)       /*!< in: requested size in bytes */
922
699
{
972
749
 
973
750
        for (i = chunk->size; i--; ) {
974
751
 
975
 
                buf_block_init(buf_pool, block, frame);
 
752
                buf_block_init(block, frame);
976
753
 
977
754
#ifdef HAVE_VALGRIND
978
755
                /* Wipe contents of frame to eliminate a Purify warning */
980
757
#endif
981
758
                /* Add the block to the free list */
982
759
                UT_LIST_ADD_LAST(list, buf_pool->free, (&block->page));
983
 
 
984
760
                ut_d(block->page.in_free_list = TRUE);
985
 
                ut_ad(buf_pool_from_block(block) == buf_pool);
986
761
 
987
762
                block++;
988
763
                frame += UNIV_PAGE_SIZE;
989
764
        }
990
765
 
991
 
#ifdef PFS_GROUP_BUFFER_SYNC
992
 
        pfs_register_buffer_block(chunk);
993
 
#endif
994
766
        return(chunk);
995
767
}
996
768
 
1009
781
        buf_block_t*    block;
1010
782
        ulint           i;
1011
783
 
 
784
        ut_ad(buf_pool);
 
785
        ut_ad(buf_pool_mutex_own());
 
786
 
1012
787
        block = chunk->blocks;
1013
788
 
1014
789
        for (i = chunk->size; i--; block++) {
1029
804
buf_block_t*
1030
805
buf_pool_contains_zip(
1031
806
/*==================*/
1032
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
1033
 
        const void*     data)           /*!< in: pointer to compressed page */
 
807
        const void*     data)   /*!< in: pointer to compressed page */
1034
808
{
1035
809
        ulint           n;
1036
810
        buf_chunk_t*    chunk = buf_pool->chunks;
1037
811
 
1038
 
        ut_ad(buf_pool);
1039
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1040
812
        for (n = buf_pool->n_chunks; n--; chunk++) {
1041
 
 
1042
813
                buf_block_t* block = buf_chunk_contains_zip(chunk, data);
1043
814
 
1044
815
                if (block) {
1062
833
        buf_block_t*    block;
1063
834
        ulint           i;
1064
835
 
 
836
        ut_ad(buf_pool);
 
837
        ut_ad(buf_pool_mutex_own());
 
838
 
1065
839
        block = chunk->blocks;
1066
840
 
1067
841
        for (i = chunk->size; i--; block++) {
1111
885
        const buf_block_t*      block;
1112
886
        ulint                   i;
1113
887
 
 
888
        ut_ad(buf_pool);
 
889
        ut_ad(buf_pool_mutex_own());
 
890
 
1114
891
        block = chunk->blocks;
1115
892
 
1116
893
        for (i = chunk->size; i--; block++) {
1130
907
void
1131
908
buf_chunk_free(
1132
909
/*===========*/
1133
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
1134
910
        buf_chunk_t*    chunk)          /*!< out: chunk of buffers */
1135
911
{
1136
912
        buf_block_t*            block;
1137
913
        const buf_block_t*      block_end;
1138
914
 
1139
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
915
        ut_ad(buf_pool_mutex_own());
1140
916
 
1141
917
        block_end = chunk->blocks + chunk->size;
1142
918
 
1164
940
}
1165
941
 
1166
942
/********************************************************************//**
1167
 
Set buffer pool size variables after resizing it */
1168
 
static
1169
 
void
1170
 
buf_pool_set_sizes(void)
1171
 
/*====================*/
1172
 
{
1173
 
        ulint   i;
1174
 
        ulint   curr_size = 0;
1175
 
 
1176
 
        buf_pool_mutex_enter_all();
1177
 
 
1178
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
1179
 
                buf_pool_t*     buf_pool;
1180
 
 
1181
 
                buf_pool = buf_pool_from_array(i);
1182
 
                curr_size += buf_pool->curr_pool_size;
1183
 
        }
1184
 
 
1185
 
        srv_buf_pool_curr_size = curr_size;
1186
 
        srv_buf_pool_old_size = srv_buf_pool_size;
1187
 
 
1188
 
        buf_pool_mutex_exit_all();
1189
 
}
1190
 
 
1191
 
/********************************************************************//**
1192
 
Initialize a buffer pool instance.
1193
 
@return DB_SUCCESS if all goes well. */
1194
 
static
1195
 
ulint
1196
 
buf_pool_init_instance(
1197
 
/*===================*/
1198
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
1199
 
        ulint           buf_pool_size,  /*!< in: size in bytes */
1200
 
        ulint           instance_no)    /*!< in: id of the instance */
1201
 
{
 
943
Creates the buffer pool.
 
944
@return own: buf_pool object, NULL if not enough memory or error */
 
945
UNIV_INTERN
 
946
buf_pool_t*
 
947
buf_pool_init(void)
 
948
/*===============*/
 
949
{
 
950
        buf_chunk_t*    chunk;
1202
951
        ulint           i;
1203
 
        buf_chunk_t*    chunk;
 
952
 
 
953
        buf_pool = mem_zalloc(sizeof(buf_pool_t));
1204
954
 
1205
955
        /* 1. Initialize general fields
1206
956
        ------------------------------- */
1207
 
        mutex_create(buf_pool_mutex_key,
1208
 
                     &buf_pool->mutex, SYNC_BUF_POOL);
1209
 
        mutex_create(buf_pool_zip_mutex_key,
1210
 
                     &buf_pool->zip_mutex, SYNC_BUF_BLOCK);
1211
 
 
1212
 
        buf_pool_mutex_enter(buf_pool);
1213
 
 
1214
 
        if (buf_pool_size > 0) {
1215
 
                buf_pool->n_chunks = 1;
1216
 
                buf_pool->chunks = chunk = mem_zalloc(sizeof *chunk);
1217
 
 
1218
 
                UT_LIST_INIT(buf_pool->free);
1219
 
 
1220
 
                if (!buf_chunk_init(buf_pool, chunk, buf_pool_size)) {
1221
 
                        mem_free(chunk);
1222
 
                        mem_free(buf_pool);
1223
 
 
1224
 
                        buf_pool_mutex_exit(buf_pool);
1225
 
 
1226
 
                        return(DB_ERROR);
1227
 
                }
1228
 
 
1229
 
                buf_pool->instance_no = instance_no;
1230
 
                buf_pool->old_pool_size = buf_pool_size;
1231
 
                buf_pool->curr_size = chunk->size;
1232
 
                buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
1233
 
 
1234
 
                buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
1235
 
                buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
1236
 
                
1237
 
                buf_pool->last_printout_time = ut_time();
 
957
        mutex_create(&buf_pool_mutex, SYNC_BUF_POOL);
 
958
        mutex_create(&buf_pool_zip_mutex, SYNC_BUF_BLOCK);
 
959
 
 
960
        buf_pool_mutex_enter();
 
961
 
 
962
        buf_pool->n_chunks = 1;
 
963
        buf_pool->chunks = chunk = mem_alloc(sizeof *chunk);
 
964
 
 
965
        UT_LIST_INIT(buf_pool->free);
 
966
 
 
967
        if (!buf_chunk_init(chunk, srv_buf_pool_size)) {
 
968
                mem_free(chunk);
 
969
                mem_free(buf_pool);
 
970
                buf_pool = NULL;
 
971
                return(NULL);
1238
972
        }
 
973
 
 
974
        srv_buf_pool_old_size = srv_buf_pool_size;
 
975
        buf_pool->curr_size = chunk->size;
 
976
        srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
 
977
 
 
978
        buf_pool->page_hash = hash_create(2 * buf_pool->curr_size);
 
979
        buf_pool->zip_hash = hash_create(2 * buf_pool->curr_size);
 
980
 
 
981
        buf_pool->last_printout_time = time(NULL);
 
982
 
1239
983
        /* 2. Initialize flushing fields
1240
984
        -------------------------------- */
1241
985
 
1242
 
        mutex_create(flush_list_mutex_key, &buf_pool->flush_list_mutex,
1243
 
                     SYNC_BUF_FLUSH_LIST);
1244
 
 
1245
986
        for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
1246
987
                buf_pool->no_flush[i] = os_event_create(NULL);
1247
988
        }
1248
989
 
1249
990
        /* 3. Initialize LRU fields
1250
991
        --------------------------- */
1251
 
 
1252
 
        /* All fields are initialized by mem_zalloc(). */
1253
 
 
1254
 
        buf_pool_mutex_exit(buf_pool);
1255
 
 
1256
 
        return(DB_SUCCESS);
 
992
        /* All fields are initialized by mem_zalloc(). */
 
993
 
 
994
        buf_pool_mutex_exit();
 
995
 
 
996
        btr_search_sys_create(buf_pool->curr_size
 
997
                              * UNIV_PAGE_SIZE / sizeof(void*) / 64);
 
998
 
 
999
        /* 4. Initialize the buddy allocator fields */
 
1000
        /* All fields are initialized by mem_zalloc(). */
 
1001
 
 
1002
        return(buf_pool);
1257
1003
}
1258
1004
 
1259
1005
/********************************************************************//**
1260
 
free one buffer pool instance */
1261
 
static
 
1006
Frees the buffer pool at shutdown.  This must not be invoked before
 
1007
freeing all mutexes. */
 
1008
UNIV_INTERN
1262
1009
void
1263
 
buf_pool_free_instance(
1264
 
/*===================*/
1265
 
        buf_pool_t*     buf_pool)       /* in,own: buffer pool instance
1266
 
                                        to free */
 
1010
buf_pool_free(void)
 
1011
/*===============*/
1267
1012
{
1268
1013
        buf_chunk_t*    chunk;
1269
1014
        buf_chunk_t*    chunks;
1285
1030
}
1286
1031
 
1287
1032
/********************************************************************//**
1288
 
Creates the buffer pool.
1289
 
@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */
1290
 
UNIV_INTERN
1291
 
ulint
1292
 
buf_pool_init(
1293
 
/*==========*/
1294
 
        ulint   total_size,     /*!< in: size of the total pool in bytes */
1295
 
        ulint   n_instances)    /*!< in: number of instances */
1296
 
{
1297
 
        ulint   i;
1298
 
 
1299
 
        /* We create an extra buffer pool instance, this instance is used
1300
 
        for flushing the flush lists, to keep track of n_flush for all
1301
 
        the buffer pools and also used as a waiting object during flushing. */
1302
 
        for (i = 0; i < n_instances; i++) {
1303
 
                buf_pool_t*     ptr;
1304
 
                ulint           size;
1305
 
 
1306
 
                ptr = mem_zalloc(sizeof(*ptr));
1307
 
 
1308
 
                size = total_size / n_instances;
1309
 
 
1310
 
                buf_pool_ptr[i] = ptr;
1311
 
 
1312
 
                if (buf_pool_init_instance(ptr, size, i) != DB_SUCCESS) {
1313
 
 
1314
 
                        mem_free(buf_pool_ptr[i]);
1315
 
 
1316
 
                        /* Free all the instances created so far. */
1317
 
                        buf_pool_free(i);
1318
 
 
1319
 
                        return(DB_ERROR);
1320
 
                }
1321
 
        }
1322
 
 
1323
 
        buf_pool_set_sizes();
1324
 
        buf_LRU_old_ratio_update(100 * 3/ 8, FALSE);
1325
 
 
1326
 
        btr_search_sys_create(buf_pool_get_curr_size() / sizeof(void*) / 64);
1327
 
 
1328
 
        return(DB_SUCCESS);
1329
 
}
1330
 
 
1331
 
/********************************************************************//**
1332
 
Frees the buffer pool at shutdown.  This must not be invoked before
1333
 
freeing all mutexes. */
1334
 
UNIV_INTERN
1335
 
void
1336
 
buf_pool_free(
1337
 
/*==========*/
1338
 
        ulint   n_instances)    /*!< in: numbere of instances to free */
1339
 
{
1340
 
        ulint   i;
1341
 
 
1342
 
        for (i = 0; i < n_instances; i++) {
1343
 
                buf_pool_free_instance(buf_pool_from_array(i));
1344
 
                buf_pool_ptr[i] = NULL;
1345
 
        }
1346
 
}
1347
 
 
1348
 
/********************************************************************//**
1349
 
Drops adaptive hash index for a buffer pool instance. */
1350
 
static
1351
 
void
1352
 
buf_pool_drop_hash_index_instance(
1353
 
/*==============================*/
1354
 
        buf_pool_t*     buf_pool,               /*!< in: buffer pool instance */
1355
 
        ibool*          released_search_latch)  /*!< out: flag for signalling
1356
 
                                                whether the search latch was
1357
 
                                                released */
1358
 
{
1359
 
        buf_chunk_t*    chunks  = buf_pool->chunks;
1360
 
        buf_chunk_t*    chunk   = chunks + buf_pool->n_chunks;
1361
 
 
1362
 
        while (--chunk >= chunks) {
1363
 
                ulint           i;
1364
 
                buf_block_t*    block   = chunk->blocks;
1365
 
 
1366
 
                for (i = chunk->size; i--; block++) {
1367
 
                        /* block->is_hashed cannot be modified
1368
 
                        when we have an x-latch on btr_search_latch;
1369
 
                        see the comment in buf0buf.h */
1370
 
                        
1371
 
                        if (!block->is_hashed) {
1372
 
                                continue;
1373
 
                        }
1374
 
                        
1375
 
                        /* To follow the latching order, we
1376
 
                        have to release btr_search_latch
1377
 
                        before acquiring block->latch. */
1378
 
                        rw_lock_x_unlock(&btr_search_latch);
1379
 
                        /* When we release the search latch,
1380
 
                        we must rescan all blocks, because
1381
 
                        some may become hashed again. */
1382
 
                        *released_search_latch = TRUE;
1383
 
                        
1384
 
                        rw_lock_x_lock(&block->lock);
1385
 
                        
1386
 
                        /* This should be guaranteed by the
1387
 
                        callers, which will be holding
1388
 
                        btr_search_enabled_mutex. */
1389
 
                        ut_ad(!btr_search_enabled);
1390
 
                        
1391
 
                        /* Because we did not buffer-fix the
1392
 
                        block by calling buf_block_get_gen(),
1393
 
                        it is possible that the block has been
1394
 
                        allocated for some other use after
1395
 
                        btr_search_latch was released above.
1396
 
                        We do not care which file page the
1397
 
                        block is mapped to.  All we want to do
1398
 
                        is to drop any hash entries referring
1399
 
                        to the page. */
1400
 
                        
1401
 
                        /* It is possible that
1402
 
                        block->page.state != BUF_FILE_PAGE.
1403
 
                        Even that does not matter, because
1404
 
                        btr_search_drop_page_hash_index() will
1405
 
                        check block->is_hashed before doing
1406
 
                        anything.  block->is_hashed can only
1407
 
                        be set on uncompressed file pages. */
1408
 
                        
1409
 
                        btr_search_drop_page_hash_index(block);
1410
 
                        
1411
 
                        rw_lock_x_unlock(&block->lock);
1412
 
                        
1413
 
                        rw_lock_x_lock(&btr_search_latch);
1414
 
                        
1415
 
                        ut_ad(!btr_search_enabled);
1416
 
                }
1417
 
        }
1418
 
}
1419
 
 
1420
 
/********************************************************************//**
1421
1033
Drops the adaptive hash index.  To prevent a livelock, this function
1422
1034
is only to be called while holding btr_search_latch and while
1423
1035
btr_search_enabled == FALSE. */
1434
1046
        ut_ad(!btr_search_enabled);
1435
1047
 
1436
1048
        do {
1437
 
                ulint   i;
 
1049
                buf_chunk_t*    chunks  = buf_pool->chunks;
 
1050
                buf_chunk_t*    chunk   = chunks + buf_pool->n_chunks;
1438
1051
 
1439
1052
                released_search_latch = FALSE;
1440
1053
 
1441
 
                for (i = 0; i < srv_buf_pool_instances; i++) {
1442
 
                        buf_pool_t*     buf_pool;
1443
 
 
1444
 
                        buf_pool = buf_pool_from_array(i);
1445
 
 
1446
 
                        buf_pool_drop_hash_index_instance(
1447
 
                                buf_pool, &released_search_latch);
 
1054
                while (--chunk >= chunks) {
 
1055
                        buf_block_t*    block   = chunk->blocks;
 
1056
                        ulint           i       = chunk->size;
 
1057
 
 
1058
                        for (; i--; block++) {
 
1059
                                /* block->is_hashed cannot be modified
 
1060
                                when we have an x-latch on btr_search_latch;
 
1061
                                see the comment in buf0buf.h */
 
1062
 
 
1063
                                if (buf_block_get_state(block)
 
1064
                                    != BUF_BLOCK_FILE_PAGE
 
1065
                                    || !block->is_hashed) {
 
1066
                                        continue;
 
1067
                                }
 
1068
 
 
1069
                                /* To follow the latching order, we
 
1070
                                have to release btr_search_latch
 
1071
                                before acquiring block->latch. */
 
1072
                                rw_lock_x_unlock(&btr_search_latch);
 
1073
                                /* When we release the search latch,
 
1074
                                we must rescan all blocks, because
 
1075
                                some may become hashed again. */
 
1076
                                released_search_latch = TRUE;
 
1077
 
 
1078
                                rw_lock_x_lock(&block->lock);
 
1079
 
 
1080
                                /* This should be guaranteed by the
 
1081
                                callers, which will be holding
 
1082
                                btr_search_enabled_mutex. */
 
1083
                                ut_ad(!btr_search_enabled);
 
1084
 
 
1085
                                /* Because we did not buffer-fix the
 
1086
                                block by calling buf_block_get_gen(),
 
1087
                                it is possible that the block has been
 
1088
                                allocated for some other use after
 
1089
                                btr_search_latch was released above.
 
1090
                                We do not care which file page the
 
1091
                                block is mapped to.  All we want to do
 
1092
                                is to drop any hash entries referring
 
1093
                                to the page. */
 
1094
 
 
1095
                                /* It is possible that
 
1096
                                block->page.state != BUF_FILE_PAGE.
 
1097
                                Even that does not matter, because
 
1098
                                btr_search_drop_page_hash_index() will
 
1099
                                check block->is_hashed before doing
 
1100
                                anything.  block->is_hashed can only
 
1101
                                be set on uncompressed file pages. */
 
1102
 
 
1103
                                btr_search_drop_page_hash_index(block);
 
1104
 
 
1105
                                rw_lock_x_unlock(&block->lock);
 
1106
 
 
1107
                                rw_lock_x_lock(&btr_search_latch);
 
1108
 
 
1109
                                ut_ad(!btr_search_enabled);
 
1110
                        }
1448
1111
                }
1449
 
 
1450
1112
        } while (released_search_latch);
1451
1113
}
1452
1114
 
1465
1127
{
1466
1128
        buf_page_t*     b;
1467
1129
        ulint           fold;
1468
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1469
1130
 
1470
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1131
        ut_ad(buf_pool_mutex_own());
1471
1132
        ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1472
1133
        ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
1473
1134
        ut_a(bpage->buf_fix_count == 0);
1474
1135
        ut_ad(bpage->in_LRU_list);
1475
1136
        ut_ad(!bpage->in_zip_hash);
1476
1137
        ut_ad(bpage->in_page_hash);
1477
 
        ut_ad(bpage == buf_page_hash_get(buf_pool,
1478
 
                                         bpage->space, bpage->offset));
1479
 
        ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
 
1138
        ut_ad(bpage == buf_page_hash_get(bpage->space, bpage->offset));
1480
1139
#ifdef UNIV_DEBUG
1481
1140
        switch (buf_page_get_state(bpage)) {
1482
1141
        case BUF_BLOCK_ZIP_FREE:
1535
1194
}
1536
1195
 
1537
1196
/********************************************************************//**
1538
 
Shrinks a buffer pool instance. */
 
1197
Shrinks the buffer pool. */
1539
1198
static
1540
1199
void
1541
 
buf_pool_shrink_instance(
1542
 
/*=====================*/
1543
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
1544
 
        ulint           chunk_size)     /*!< in: number of pages to remove */
 
1200
buf_pool_shrink(
 
1201
/*============*/
 
1202
        ulint   chunk_size)     /*!< in: number of pages to remove */
1545
1203
{
1546
1204
        buf_chunk_t*    chunks;
1547
1205
        buf_chunk_t*    chunk;
1550
1208
        buf_chunk_t*    max_chunk;
1551
1209
        buf_chunk_t*    max_free_chunk;
1552
1210
 
1553
 
        ut_ad(!buf_pool_mutex_own(buf_pool));
 
1211
        ut_ad(!buf_pool_mutex_own());
1554
1212
 
1555
1213
try_again:
1556
1214
        btr_search_disable(); /* Empty the adaptive hash index again */
1557
 
        buf_pool_mutex_enter(buf_pool);
 
1215
        buf_pool_mutex_enter();
1558
1216
 
1559
1217
shrink_again:
1560
1218
        if (buf_pool->n_chunks <= 1) {
1617
1275
 
1618
1276
                        mutex_enter(&block->mutex);
1619
1277
                        /* The following calls will temporarily
1620
 
                        release block->mutex and buf_pool->mutex.
 
1278
                        release block->mutex and buf_pool_mutex.
1621
1279
                        Therefore, we have to always retry,
1622
1280
                        even if !dirty && !nonfree. */
1623
1281
 
1633
1291
                        mutex_exit(&block->mutex);
1634
1292
                }
1635
1293
 
1636
 
                buf_pool_mutex_exit(buf_pool);
 
1294
                buf_pool_mutex_exit();
1637
1295
 
1638
1296
                /* Request for a flush of the chunk if it helps.
1639
1297
                Do not flush if there are non-free blocks, since
1642
1300
                        /* Avoid busy-waiting. */
1643
1301
                        os_thread_sleep(100000);
1644
1302
                } else if (dirty
1645
 
                           && buf_flush_LRU(buf_pool, dirty)
1646
 
                              == ULINT_UNDEFINED) {
 
1303
                           && buf_flush_batch(BUF_FLUSH_LRU, dirty, 0)
 
1304
                           == ULINT_UNDEFINED) {
1647
1305
 
1648
 
                        buf_flush_wait_batch_end(buf_pool, BUF_FLUSH_LRU);
 
1306
                        buf_flush_wait_batch_end(BUF_FLUSH_LRU);
1649
1307
                }
1650
1308
 
1651
1309
                goto try_again;
1654
1312
        max_size = max_free_size;
1655
1313
        max_chunk = max_free_chunk;
1656
1314
 
1657
 
        buf_pool->old_pool_size = buf_pool->curr_pool_size;
 
1315
        srv_buf_pool_old_size = srv_buf_pool_size;
1658
1316
 
1659
1317
        /* Rewrite buf_pool->chunks.  Copy everything but max_chunk. */
1660
1318
        chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks);
1666
1324
               - (max_chunk + 1));
1667
1325
        ut_a(buf_pool->curr_size > max_chunk->size);
1668
1326
        buf_pool->curr_size -= max_chunk->size;
1669
 
        buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
 
1327
        srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
1670
1328
        chunk_size -= max_chunk->size;
1671
 
        buf_chunk_free(buf_pool, max_chunk);
 
1329
        buf_chunk_free(max_chunk);
1672
1330
        mem_free(buf_pool->chunks);
1673
1331
        buf_pool->chunks = chunks;
1674
1332
        buf_pool->n_chunks--;
1678
1336
 
1679
1337
                goto shrink_again;
1680
1338
        }
1681
 
        goto func_exit;
1682
1339
 
1683
1340
func_done:
1684
 
        buf_pool->old_pool_size = buf_pool->curr_pool_size;
 
1341
        srv_buf_pool_old_size = srv_buf_pool_size;
1685
1342
func_exit:
1686
 
        buf_pool_mutex_exit(buf_pool);
 
1343
        buf_pool_mutex_exit();
1687
1344
        btr_search_enable();
1688
1345
}
1689
1346
 
1690
1347
/********************************************************************//**
1691
 
Shrinks the buffer pool. */
1692
 
static
1693
 
void
1694
 
buf_pool_shrink(
1695
 
/*============*/
1696
 
        ulint   chunk_size)     /*!< in: number of pages to remove */
1697
 
{
1698
 
        ulint   i;
1699
 
 
1700
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
1701
 
                buf_pool_t*     buf_pool;
1702
 
                ulint           instance_chunk_size;
1703
 
 
1704
 
                instance_chunk_size = chunk_size / srv_buf_pool_instances;
1705
 
                buf_pool = buf_pool_from_array(i);
1706
 
                buf_pool_shrink_instance(buf_pool, instance_chunk_size);
1707
 
        }
1708
 
 
1709
 
        buf_pool_set_sizes();
1710
 
}
1711
 
 
1712
 
/********************************************************************//**
1713
 
Rebuild buf_pool->page_hash for a buffer pool instance. */
1714
 
static
1715
 
void
1716
 
buf_pool_page_hash_rebuild_instance(
1717
 
/*================================*/
1718
 
        buf_pool_t*     buf_pool)               /*!< in: buffer pool instance */
 
1348
Rebuild buf_pool->page_hash. */
 
1349
static
 
1350
void
 
1351
buf_pool_page_hash_rebuild(void)
 
1352
/*============================*/
1719
1353
{
1720
1354
        ulint           i;
1721
 
        buf_page_t*     b;
1722
 
        buf_chunk_t*    chunk;
1723
1355
        ulint           n_chunks;
 
1356
        buf_chunk_t*    chunk;
 
1357
        hash_table_t*   page_hash;
1724
1358
        hash_table_t*   zip_hash;
1725
 
        hash_table_t*   page_hash;
 
1359
        buf_page_t*     b;
1726
1360
 
1727
 
        buf_pool_mutex_enter(buf_pool);
 
1361
        buf_pool_mutex_enter();
1728
1362
 
1729
1363
        /* Free, create, and populate the hash table. */
1730
1364
        hash_table_free(buf_pool->page_hash);
1777
1411
                            buf_page_address_fold(b->space, b->offset), b);
1778
1412
        }
1779
1413
 
1780
 
        buf_flush_list_mutex_enter(buf_pool);
1781
1414
        for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
1782
1415
             b = UT_LIST_GET_NEXT(list, b)) {
1783
1416
                ut_ad(b->in_flush_list);
1805
1438
                }
1806
1439
        }
1807
1440
 
1808
 
        buf_flush_list_mutex_exit(buf_pool);
1809
 
        buf_pool_mutex_exit(buf_pool);
1810
 
}
1811
 
 
1812
 
/********************************************************************
1813
 
Determine if a block is a sentinel for a buffer pool watch.
1814
 
@return TRUE if a sentinel for a buffer pool watch, FALSE if not */
1815
 
UNIV_INTERN
1816
 
ibool
1817
 
buf_pool_watch_is_sentinel(
1818
 
/*=======================*/
1819
 
        buf_pool_t*             buf_pool,       /*!< buffer pool instance */
1820
 
        const buf_page_t*       bpage)          /*!< in: block */
1821
 
{
1822
 
        ut_ad(buf_page_in_file(bpage));
1823
 
 
1824
 
        if (bpage < &buf_pool->watch[0]
1825
 
            || bpage >= &buf_pool->watch[BUF_POOL_WATCH_SIZE]) {
1826
 
 
1827
 
                ut_ad(buf_page_get_state(bpage) != BUF_BLOCK_ZIP_PAGE
1828
 
                      || bpage->zip.data != NULL);
1829
 
 
1830
 
                return(FALSE);
1831
 
        }
1832
 
 
1833
 
        ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
1834
 
        ut_ad(!bpage->in_zip_hash);
1835
 
        ut_ad(bpage->in_page_hash);
1836
 
        ut_ad(bpage->zip.data == NULL);
1837
 
        ut_ad(bpage->buf_fix_count > 0);
1838
 
        return(TRUE);
1839
 
}
1840
 
 
1841
 
/****************************************************************//**
1842
 
Add watch for the given page to be read in. Caller must have the buffer pool
1843
 
mutex reserved.
1844
 
@return NULL if watch set, block if the page is in the buffer pool */
1845
 
UNIV_INTERN
1846
 
buf_page_t*
1847
 
buf_pool_watch_set(
1848
 
/*===============*/
1849
 
        ulint   space,  /*!< in: space id */
1850
 
        ulint   offset, /*!< in: page number */
1851
 
        ulint   fold)   /*!< in: buf_page_address_fold(space, offset) */
1852
 
{
1853
 
        buf_page_t*     bpage;
1854
 
        ulint           i;
1855
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
1856
 
 
1857
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1858
 
 
1859
 
        bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
1860
 
 
1861
 
        if (UNIV_LIKELY_NULL(bpage)) {
1862
 
                if (!buf_pool_watch_is_sentinel(buf_pool, bpage)) {
1863
 
                        /* The page was loaded meanwhile. */
1864
 
                        return(bpage);
1865
 
                }
1866
 
                /* Add to an existing watch. */
1867
 
                bpage->buf_fix_count++;
1868
 
                return(NULL);
1869
 
        }
1870
 
 
1871
 
        for (i = 0; i < BUF_POOL_WATCH_SIZE; i++) {
1872
 
                bpage = &buf_pool->watch[i];
1873
 
 
1874
 
                ut_ad(bpage->access_time == 0);
1875
 
                ut_ad(bpage->newest_modification == 0);
1876
 
                ut_ad(bpage->oldest_modification == 0);
1877
 
                ut_ad(bpage->zip.data == NULL);
1878
 
                ut_ad(!bpage->in_zip_hash);
1879
 
 
1880
 
                switch (bpage->state) {
1881
 
                case BUF_BLOCK_POOL_WATCH:
1882
 
                        ut_ad(!bpage->in_page_hash);
1883
 
                        ut_ad(bpage->buf_fix_count == 0);
1884
 
 
1885
 
                        /* bpage is pointing to buf_pool_watch[],
1886
 
                        which is protected by buf_pool_mutex.
1887
 
                        Normally, buf_page_t objects are protected by
1888
 
                        buf_block_t::mutex or buf_pool->zip_mutex or both. */
1889
 
 
1890
 
                        bpage->state = BUF_BLOCK_ZIP_PAGE;
1891
 
                        bpage->space = space;
1892
 
                        bpage->offset = offset;
1893
 
                        bpage->buf_fix_count = 1;
1894
 
 
1895
 
                        ut_d(bpage->in_page_hash = TRUE);
1896
 
                        HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
1897
 
                                    fold, bpage);
1898
 
                        return(NULL);
1899
 
                case BUF_BLOCK_ZIP_PAGE:
1900
 
                        ut_ad(bpage->in_page_hash);
1901
 
                        ut_ad(bpage->buf_fix_count > 0);
1902
 
                        break;
1903
 
                default:
1904
 
                        ut_error;
1905
 
                }
1906
 
        }
1907
 
 
1908
 
        /* Allocation failed.  Either the maximum number of purge
1909
 
        threads should never exceed BUF_POOL_WATCH_SIZE, or this code
1910
 
        should be modified to return a special non-NULL value and the
1911
 
        caller should purge the record directly. */
1912
 
        ut_error;
1913
 
 
1914
 
        /* Fix compiler warning */
1915
 
        return(NULL);
1916
 
}
1917
 
 
1918
 
/********************************************************************//**
1919
 
Rebuild buf_pool->page_hash. */
1920
 
static
1921
 
void
1922
 
buf_pool_page_hash_rebuild(void)
1923
 
/*============================*/
1924
 
{
1925
 
        ulint   i;
1926
 
 
1927
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
1928
 
                buf_pool_page_hash_rebuild_instance(buf_pool_from_array(i));
1929
 
        }
1930
 
}
1931
 
 
1932
 
/********************************************************************//**
1933
 
Increase the buffer pool size of one buffer pool instance. */
1934
 
static
1935
 
void
1936
 
buf_pool_increase_instance(
1937
 
/*=======================*/
1938
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instane */
1939
 
        ulint           change_size)    /*!< in: new size of the pool */
1940
 
{
1941
 
        buf_chunk_t*    chunks;
1942
 
        buf_chunk_t*    chunk;
1943
 
 
1944
 
        buf_pool_mutex_enter(buf_pool);
1945
 
        chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
1946
 
 
1947
 
        memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks * sizeof *chunks);
1948
 
 
1949
 
        chunk = &chunks[buf_pool->n_chunks];
1950
 
 
1951
 
        if (!buf_chunk_init(buf_pool, chunk, change_size)) {
1952
 
                mem_free(chunks);
1953
 
        } else {
1954
 
                buf_pool->old_pool_size = buf_pool->curr_pool_size;
1955
 
                buf_pool->curr_size += chunk->size;
1956
 
                buf_pool->curr_pool_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
1957
 
                mem_free(buf_pool->chunks);
1958
 
                buf_pool->chunks = chunks;
1959
 
                buf_pool->n_chunks++;
1960
 
        }
1961
 
 
1962
 
        buf_pool_mutex_exit(buf_pool);
1963
 
}
1964
 
 
1965
 
/********************************************************************//**
1966
 
Increase the buffer pool size. */
1967
 
static
1968
 
void
1969
 
buf_pool_increase(
1970
 
/*==============*/
1971
 
        ulint   change_size)
1972
 
{
1973
 
        ulint   i;
1974
 
 
1975
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
1976
 
                buf_pool_increase_instance(
1977
 
                        buf_pool_from_array(i),
1978
 
                        change_size / srv_buf_pool_instances);
1979
 
        }
1980
 
 
1981
 
        buf_pool_set_sizes();
 
1441
        buf_pool_mutex_exit();
1982
1442
}
1983
1443
 
1984
1444
/********************************************************************//**
1988
1448
buf_pool_resize(void)
1989
1449
/*=================*/
1990
1450
{
1991
 
        ulint   change_size;
1992
 
        ulint   min_change_size = 1048576 * srv_buf_pool_instances;
1993
 
 
1994
 
        buf_pool_mutex_enter_all();
1995
 
  
1996
 
        if (srv_buf_pool_old_size == srv_buf_pool_size) {
1997
 
  
1998
 
                buf_pool_mutex_exit_all();
1999
 
 
2000
 
                return;
2001
 
 
2002
 
        } else if (srv_buf_pool_curr_size + min_change_size
2003
 
                   > srv_buf_pool_size) {
2004
 
  
2005
 
                change_size = (srv_buf_pool_curr_size - srv_buf_pool_size)
2006
 
                            / UNIV_PAGE_SIZE;
2007
 
 
2008
 
                buf_pool_mutex_exit_all();
2009
 
  
2010
 
                /* Disable adaptive hash indexes and empty the index
2011
 
                in order to free up memory in the buffer pool chunks. */
2012
 
                buf_pool_shrink(change_size);
2013
 
 
2014
 
        } else if (srv_buf_pool_curr_size + min_change_size
2015
 
                   < srv_buf_pool_size) {
2016
 
 
2017
 
                /* Enlarge the buffer pool by at least one megabyte */
2018
 
  
2019
 
                change_size = srv_buf_pool_size - srv_buf_pool_curr_size;
2020
 
 
2021
 
                buf_pool_mutex_exit_all();
2022
 
 
2023
 
                buf_pool_increase(change_size);
2024
 
        } else {
2025
 
                srv_buf_pool_size = srv_buf_pool_old_size;
2026
 
 
2027
 
                buf_pool_mutex_exit_all();
2028
 
 
 
1451
        buf_pool_mutex_enter();
 
1452
 
 
1453
        if (srv_buf_pool_old_size == srv_buf_pool_size) {
 
1454
 
 
1455
                buf_pool_mutex_exit();
2029
1456
                return;
2030
1457
        }
2031
 
  
2032
 
        buf_pool_page_hash_rebuild();
2033
 
}
2034
 
 
2035
 
/****************************************************************//**
2036
 
Remove the sentinel block for the watch before replacing it with a real block.
2037
 
buf_page_watch_clear() or buf_page_watch_occurred() will notice that
2038
 
the block has been replaced with the real block.
2039
 
@return reference count, to be added to the replacement block */
2040
 
static
2041
 
void
2042
 
buf_pool_watch_remove(
2043
 
/*==================*/
2044
 
        buf_pool_t*     buf_pool,       /*!< buffer pool instance */
2045
 
        ulint           fold,           /*!< in: buf_page_address_fold(
2046
 
                                        space, offset) */
2047
 
        buf_page_t*     watch)          /*!< in/out: sentinel for watch */
2048
 
{
2049
 
        ut_ad(buf_pool_mutex_own(buf_pool));
2050
 
 
2051
 
        HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, watch);
2052
 
        ut_d(watch->in_page_hash = FALSE);
2053
 
        watch->buf_fix_count = 0;
2054
 
        watch->state = BUF_BLOCK_POOL_WATCH;
2055
 
}
2056
 
 
2057
 
/****************************************************************//**
2058
 
Stop watching if the page has been read in.
2059
 
buf_pool_watch_set(space,offset) must have returned NULL before. */
2060
 
UNIV_INTERN
2061
 
void
2062
 
buf_pool_watch_unset(
2063
 
/*=================*/
2064
 
        ulint   space,  /*!< in: space id */
2065
 
        ulint   offset) /*!< in: page number */
2066
 
{
2067
 
        buf_page_t*     bpage;
2068
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2069
 
        ulint           fold = buf_page_address_fold(space, offset);
2070
 
 
2071
 
        buf_pool_mutex_enter(buf_pool);
2072
 
        bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
2073
 
        /* The page must exist because buf_pool_watch_set()
2074
 
        increments buf_fix_count. */
2075
 
        ut_a(bpage);
2076
 
 
2077
 
        if (UNIV_UNLIKELY(!buf_pool_watch_is_sentinel(buf_pool, bpage))) {
2078
 
                mutex_t* mutex = buf_page_get_mutex(bpage);
2079
 
 
2080
 
                mutex_enter(mutex);
2081
 
                ut_a(bpage->buf_fix_count > 0);
2082
 
                bpage->buf_fix_count--;
2083
 
                mutex_exit(mutex);
2084
 
        } else {
2085
 
                ut_a(bpage->buf_fix_count > 0);
2086
 
 
2087
 
                if (UNIV_LIKELY(!--bpage->buf_fix_count)) {
2088
 
                        buf_pool_watch_remove(buf_pool, fold, bpage);
 
1458
 
 
1459
        if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) {
 
1460
 
 
1461
                buf_pool_mutex_exit();
 
1462
 
 
1463
                /* Disable adaptive hash indexes and empty the index
 
1464
                in order to free up memory in the buffer pool chunks. */
 
1465
                buf_pool_shrink((srv_buf_pool_curr_size - srv_buf_pool_size)
 
1466
                                / UNIV_PAGE_SIZE);
 
1467
        } else if (srv_buf_pool_curr_size + 1048576 < srv_buf_pool_size) {
 
1468
 
 
1469
                /* Enlarge the buffer pool by at least one megabyte */
 
1470
 
 
1471
                ulint           mem_size
 
1472
                        = srv_buf_pool_size - srv_buf_pool_curr_size;
 
1473
                buf_chunk_t*    chunks;
 
1474
                buf_chunk_t*    chunk;
 
1475
 
 
1476
                chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
 
1477
 
 
1478
                memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks
 
1479
                       * sizeof *chunks);
 
1480
 
 
1481
                chunk = &chunks[buf_pool->n_chunks];
 
1482
 
 
1483
                if (!buf_chunk_init(chunk, mem_size)) {
 
1484
                        mem_free(chunks);
 
1485
                } else {
 
1486
                        buf_pool->curr_size += chunk->size;
 
1487
                        srv_buf_pool_curr_size = buf_pool->curr_size
 
1488
                                * UNIV_PAGE_SIZE;
 
1489
                        mem_free(buf_pool->chunks);
 
1490
                        buf_pool->chunks = chunks;
 
1491
                        buf_pool->n_chunks++;
2089
1492
                }
 
1493
 
 
1494
                srv_buf_pool_old_size = srv_buf_pool_size;
 
1495
                buf_pool_mutex_exit();
2090
1496
        }
2091
1497
 
2092
 
        buf_pool_mutex_exit(buf_pool);
2093
 
}
2094
 
 
2095
 
/****************************************************************//**
2096
 
Check if the page has been read in.
2097
 
This may only be called after buf_pool_watch_set(space,offset)
2098
 
has returned NULL and before invoking buf_pool_watch_unset(space,offset).
2099
 
@return FALSE if the given page was not read in, TRUE if it was */
2100
 
UNIV_INTERN
2101
 
ibool
2102
 
buf_pool_watch_occurred(
2103
 
/*====================*/
2104
 
        ulint   space,  /*!< in: space id */
2105
 
        ulint   offset) /*!< in: page number */
2106
 
{
2107
 
        ibool           ret;
2108
 
        buf_page_t*     bpage;
2109
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2110
 
        ulint           fold    = buf_page_address_fold(space, offset);
2111
 
 
2112
 
        buf_pool_mutex_enter(buf_pool);
2113
 
 
2114
 
        bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
2115
 
        /* The page must exist because buf_pool_watch_set()
2116
 
        increments buf_fix_count. */
2117
 
        ut_a(bpage);
2118
 
        ret = !buf_pool_watch_is_sentinel(buf_pool, bpage);
2119
 
        buf_pool_mutex_exit(buf_pool);
2120
 
 
2121
 
        return(ret);
 
1498
        buf_pool_page_hash_rebuild();
2122
1499
}
2123
1500
 
2124
1501
/********************************************************************//**
2131
1508
/*================*/
2132
1509
        buf_page_t*     bpage)  /*!< in: buffer block of a file page */
2133
1510
{
2134
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
2135
 
 
2136
 
        buf_pool_mutex_enter(buf_pool);
 
1511
        buf_pool_mutex_enter();
2137
1512
 
2138
1513
        ut_a(buf_page_in_file(bpage));
2139
1514
 
2140
1515
        buf_LRU_make_block_young(bpage);
2141
1516
 
2142
 
        buf_pool_mutex_exit(buf_pool);
 
1517
        buf_pool_mutex_exit();
2143
1518
}
2144
1519
 
2145
1520
/********************************************************************//**
2157
1532
                                        read under mutex protection,
2158
1533
                                        or 0 if unknown */
2159
1534
{
2160
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
2161
 
 
2162
 
        ut_ad(!buf_pool_mutex_own(buf_pool));
 
1535
        ut_ad(!buf_pool_mutex_own());
2163
1536
        ut_a(buf_page_in_file(bpage));
2164
1537
 
2165
1538
        if (buf_page_peek_if_too_old(bpage)) {
2166
 
                buf_pool_mutex_enter(buf_pool);
 
1539
                buf_pool_mutex_enter();
2167
1540
                buf_LRU_make_block_young(bpage);
2168
 
                buf_pool_mutex_exit(buf_pool);
 
1541
                buf_pool_mutex_exit();
2169
1542
        } else if (!access_time) {
2170
1543
                ulint   time_ms = ut_time_ms();
2171
 
                buf_pool_mutex_enter(buf_pool);
 
1544
                buf_pool_mutex_enter();
2172
1545
                buf_page_set_accessed(bpage, time_ms);
2173
 
                buf_pool_mutex_exit(buf_pool);
 
1546
                buf_pool_mutex_exit();
2174
1547
        }
2175
1548
}
2176
1549
 
2185
1558
        ulint   offset) /*!< in: page number */
2186
1559
{
2187
1560
        buf_block_t*    block;
2188
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2189
 
 
2190
 
        buf_pool_mutex_enter(buf_pool);
2191
 
 
2192
 
        block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
 
1561
 
 
1562
        buf_pool_mutex_enter();
 
1563
 
 
1564
        block = (buf_block_t*) buf_page_hash_get(space, offset);
2193
1565
 
2194
1566
        if (block && buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE) {
2195
 
                ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
2196
1567
                block->check_index_page_at_flush = FALSE;
2197
1568
        }
2198
1569
 
2199
 
        buf_pool_mutex_exit(buf_pool);
 
1570
        buf_pool_mutex_exit();
2200
1571
}
2201
1572
 
2202
1573
/********************************************************************//**
2213
1584
{
2214
1585
        buf_block_t*    block;
2215
1586
        ibool           is_hashed;
2216
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2217
 
 
2218
 
        buf_pool_mutex_enter(buf_pool);
2219
 
 
2220
 
        block = (buf_block_t*) buf_page_hash_get(buf_pool, space, offset);
 
1587
 
 
1588
        buf_pool_mutex_enter();
 
1589
 
 
1590
        block = (buf_block_t*) buf_page_hash_get(space, offset);
2221
1591
 
2222
1592
        if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
2223
1593
                is_hashed = FALSE;
2224
1594
        } else {
2225
 
                ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
2226
1595
                is_hashed = block->is_hashed;
2227
1596
        }
2228
1597
 
2229
 
        buf_pool_mutex_exit(buf_pool);
 
1598
        buf_pool_mutex_exit();
2230
1599
 
2231
1600
        return(is_hashed);
2232
1601
}
2246
1615
        ulint   offset) /*!< in: page number */
2247
1616
{
2248
1617
        buf_page_t*     bpage;
2249
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2250
 
 
2251
 
        buf_pool_mutex_enter(buf_pool);
2252
 
 
2253
 
        bpage = buf_page_hash_get(buf_pool, space, offset);
 
1618
 
 
1619
        buf_pool_mutex_enter();
 
1620
 
 
1621
        bpage = buf_page_hash_get(space, offset);
2254
1622
 
2255
1623
        if (bpage) {
2256
 
                ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
2257
1624
                bpage->file_page_was_freed = TRUE;
2258
1625
        }
2259
1626
 
2260
 
        buf_pool_mutex_exit(buf_pool);
 
1627
        buf_pool_mutex_exit();
2261
1628
 
2262
1629
        return(bpage);
2263
1630
}
2276
1643
        ulint   offset) /*!< in: page number */
2277
1644
{
2278
1645
        buf_page_t*     bpage;
2279
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2280
 
 
2281
 
        buf_pool_mutex_enter(buf_pool);
2282
 
 
2283
 
        bpage = buf_page_hash_get(buf_pool, space, offset);
 
1646
 
 
1647
        buf_pool_mutex_enter();
 
1648
 
 
1649
        bpage = buf_page_hash_get(space, offset);
2284
1650
 
2285
1651
        if (bpage) {
2286
 
                ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
2287
1652
                bpage->file_page_was_freed = FALSE;
2288
1653
        }
2289
1654
 
2290
 
        buf_pool_mutex_exit(buf_pool);
 
1655
        buf_pool_mutex_exit();
2291
1656
 
2292
1657
        return(bpage);
2293
1658
}
2314
1679
        mutex_t*        block_mutex;
2315
1680
        ibool           must_read;
2316
1681
        unsigned        access_time;
2317
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2318
1682
 
2319
1683
#ifndef UNIV_LOG_DEBUG
2320
1684
        ut_ad(!ibuf_inside());
2322
1686
        buf_pool->stat.n_page_gets++;
2323
1687
 
2324
1688
        for (;;) {
2325
 
                buf_pool_mutex_enter(buf_pool);
 
1689
                buf_pool_mutex_enter();
2326
1690
lookup:
2327
 
                bpage = buf_page_hash_get(buf_pool, space, offset);
 
1691
                bpage = buf_page_hash_get(space, offset);
2328
1692
                if (bpage) {
2329
 
                        ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
2330
1693
                        break;
2331
1694
                }
2332
1695
 
2333
1696
                /* Page not in buf_pool: needs to be read from file */
2334
1697
 
2335
 
                buf_pool_mutex_exit(buf_pool);
 
1698
                buf_pool_mutex_exit();
2336
1699
 
2337
1700
                buf_read_page(space, zip_size, offset);
2338
1701
 
2344
1707
        if (UNIV_UNLIKELY(!bpage->zip.data)) {
2345
1708
                /* There is no compressed page. */
2346
1709
err_exit:
2347
 
                buf_pool_mutex_exit(buf_pool);
 
1710
                buf_pool_mutex_exit();
2348
1711
                return(NULL);
2349
1712
        }
2350
1713
 
2351
 
        ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
2352
 
 
2353
1714
        switch (buf_page_get_state(bpage)) {
2354
1715
        case BUF_BLOCK_NOT_USED:
2355
1716
        case BUF_BLOCK_READY_FOR_USE:
2359
1720
                break;
2360
1721
        case BUF_BLOCK_ZIP_PAGE:
2361
1722
        case BUF_BLOCK_ZIP_DIRTY:
2362
 
                block_mutex = &buf_pool->zip_mutex;
 
1723
                block_mutex = &buf_pool_zip_mutex;
2363
1724
                mutex_enter(block_mutex);
2364
1725
                bpage->buf_fix_count++;
2365
1726
                goto got_block;
2387
1748
        must_read = buf_page_get_io_fix(bpage) == BUF_IO_READ;
2388
1749
        access_time = buf_page_is_accessed(bpage);
2389
1750
 
2390
 
        buf_pool_mutex_exit(buf_pool);
 
1751
        buf_pool_mutex_exit();
2391
1752
 
2392
1753
        mutex_exit(block_mutex);
2393
1754
 
2517
1878
 
2518
1879
#ifndef UNIV_HOTBACKUP
2519
1880
/*******************************************************************//**
2520
 
Gets the block to whose frame the pointer is pointing to if found
2521
 
in this buffer pool instance.
2522
 
@return pointer to block */
2523
 
static
 
1881
Gets the block to whose frame the pointer is pointing to.
 
1882
@return pointer to block, never NULL */
 
1883
UNIV_INTERN
2524
1884
buf_block_t*
2525
 
buf_block_align_instance(
2526
 
/*=====================*/
2527
 
        buf_pool_t*     buf_pool,       /*!< in: buffer in which the block
2528
 
                                        resides */
2529
 
        const byte*     ptr)            /*!< in: pointer to a frame */
 
1885
buf_block_align(
 
1886
/*============*/
 
1887
        const byte*     ptr)    /*!< in: pointer to a frame */
2530
1888
{
2531
1889
        buf_chunk_t*    chunk;
2532
1890
        ulint           i;
2552
1910
                        ut_ad(block->frame == page_align(ptr));
2553
1911
#ifdef UNIV_DEBUG
2554
1912
                        /* A thread that updates these fields must
2555
 
                        hold buf_pool->mutex and block->mutex.  Acquire
 
1913
                        hold buf_pool_mutex and block->mutex.  Acquire
2556
1914
                        only the latter. */
2557
1915
                        mutex_enter(&block->mutex);
2558
1916
 
2601
1959
                }
2602
1960
        }
2603
1961
 
2604
 
        return(NULL);
2605
 
}
2606
 
 
2607
 
/*******************************************************************//**
2608
 
Gets the block to whose frame the pointer is pointing to.
2609
 
@return pointer to block, never NULL */
2610
 
UNIV_INTERN
2611
 
buf_block_t*
2612
 
buf_block_align(
2613
 
/*============*/
2614
 
        const byte*     ptr)    /*!< in: pointer to a frame */
2615
 
{
2616
 
        ulint           i;
2617
 
 
2618
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2619
 
                buf_block_t*    block;
2620
 
 
2621
 
                block = buf_block_align_instance(
2622
 
                        buf_pool_from_array(i), ptr);
2623
 
                if (block) {
2624
 
                        return(block);
2625
 
                }
2626
 
        }
2627
 
 
2628
1962
        /* The block should always be found. */
2629
1963
        ut_error;
2630
1964
        return(NULL);
2632
1966
 
2633
1967
/********************************************************************//**
2634
1968
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
2635
 
the buf_block_t itself or a member of it. This functions checks one of
2636
 
the buffer pool instances.
 
1969
the buf_block_t itself or a member of it
2637
1970
@return TRUE if ptr belongs to a buf_block_t struct */
2638
 
static
 
1971
UNIV_INTERN
2639
1972
ibool
2640
 
buf_pointer_is_block_field_instance(
2641
 
/*================================*/
2642
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
2643
 
        const void*     ptr)            /*!< in: pointer not dereferenced */
 
1973
buf_pointer_is_block_field(
 
1974
/*=======================*/
 
1975
        const void*             ptr)    /*!< in: pointer not
 
1976
                                        dereferenced */
2644
1977
{
2645
1978
        const buf_chunk_t*              chunk   = buf_pool->chunks;
2646
1979
        const buf_chunk_t* const        echunk  = chunk + buf_pool->n_chunks;
2661
1994
}
2662
1995
 
2663
1996
/********************************************************************//**
2664
 
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
2665
 
the buf_block_t itself or a member of it
2666
 
@return TRUE if ptr belongs to a buf_block_t struct */
2667
 
UNIV_INTERN
2668
 
ibool
2669
 
buf_pointer_is_block_field(
2670
 
/*=======================*/
2671
 
        const void*     ptr)    /*!< in: pointer not dereferenced */
2672
 
{
2673
 
        ulint   i;
2674
 
 
2675
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2676
 
                ibool   found;
2677
 
 
2678
 
                found = buf_pointer_is_block_field_instance(
2679
 
                        buf_pool_from_array(i), ptr);
2680
 
                if (found) {
2681
 
                        return(TRUE);
2682
 
                }
2683
 
        }
2684
 
 
2685
 
        return(FALSE);
2686
 
}
2687
 
 
2688
 
/********************************************************************//**
2689
1997
Find out if a buffer block was created by buf_chunk_init().
2690
1998
@return TRUE if "block" has been added to buf_pool->free by buf_chunk_init() */
2691
1999
static
2692
2000
ibool
2693
2001
buf_block_is_uncompressed(
2694
2002
/*======================*/
2695
 
        buf_pool_t*             buf_pool,       /*!< in: buffer pool instance */
2696
 
        const buf_block_t*      block)          /*!< in: pointer to block,
2697
 
                                                not dereferenced */
 
2003
        const buf_block_t*      block)  /*!< in: pointer to block,
 
2004
                                        not dereferenced */
2698
2005
{
2699
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
2006
        ut_ad(buf_pool_mutex_own());
2700
2007
 
2701
2008
        if (UNIV_UNLIKELY((((ulint) block) % sizeof *block) != 0)) {
2702
2009
                /* The pointer should be aligned. */
2703
2010
                return(FALSE);
2704
2011
        }
2705
2012
 
2706
 
        return(buf_pointer_is_block_field_instance(buf_pool, (void *)block));
 
2013
        return(buf_pointer_is_block_field((void *)block));
2707
2014
}
2708
2015
 
2709
2016
/********************************************************************//**
2720
2027
        ulint           rw_latch,/*!< in: RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH */
2721
2028
        buf_block_t*    guess,  /*!< in: guessed block or NULL */
2722
2029
        ulint           mode,   /*!< in: BUF_GET, BUF_GET_IF_IN_POOL,
2723
 
                                BUF_GET_NO_LATCH, or
2724
 
                                BUF_GET_IF_IN_POOL_OR_WATCH */
 
2030
                                BUF_GET_NO_LATCH */
2725
2031
        const char*     file,   /*!< in: file name */
2726
2032
        ulint           line,   /*!< in: line where called */
2727
2033
        mtr_t*          mtr)    /*!< in: mini-transaction */
2728
2034
{
2729
2035
        buf_block_t*    block;
2730
 
        ulint           fold;
2731
2036
        unsigned        access_time;
2732
2037
        ulint           fix_type;
2733
2038
        ibool           must_read;
2734
2039
        ulint           retries = 0;
2735
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
2736
2040
 
2737
2041
        ut_ad(mtr);
2738
2042
        ut_ad(mtr->state == MTR_ACTIVE);
2740
2044
              || (rw_latch == RW_X_LATCH)
2741
2045
              || (rw_latch == RW_NO_LATCH));
2742
2046
        ut_ad((mode != BUF_GET_NO_LATCH) || (rw_latch == RW_NO_LATCH));
2743
 
        ut_ad(mode == BUF_GET
2744
 
              || mode == BUF_GET_IF_IN_POOL
2745
 
              || mode == BUF_GET_NO_LATCH
2746
 
              || mode == BUF_GET_IF_IN_POOL_OR_WATCH);
 
2047
        ut_ad((mode == BUF_GET) || (mode == BUF_GET_IF_IN_POOL)
 
2048
              || (mode == BUF_GET_NO_LATCH));
2747
2049
        ut_ad(zip_size == fil_space_get_zip_size(space));
2748
2050
        ut_ad(ut_is_2pow(zip_size));
2749
2051
#ifndef UNIV_LOG_DEBUG
2750
2052
        ut_ad(!ibuf_inside() || ibuf_page(space, zip_size, offset, NULL));
2751
2053
#endif
2752
2054
        buf_pool->stat.n_page_gets++;
2753
 
        fold = buf_page_address_fold(space, offset);
2754
2055
loop:
2755
2056
        block = guess;
2756
 
        buf_pool_mutex_enter(buf_pool);
 
2057
        buf_pool_mutex_enter();
2757
2058
 
2758
2059
        if (block) {
2759
2060
                /* If the guess is a compressed page descriptor that
2764
2065
                the guess may be pointing to a buffer pool chunk that
2765
2066
                has been released when resizing the buffer pool. */
2766
2067
 
2767
 
                if (!buf_block_is_uncompressed(buf_pool, block)
 
2068
                if (!buf_block_is_uncompressed(block)
2768
2069
                    || offset != block->page.offset
2769
2070
                    || space != block->page.space
2770
2071
                    || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
2777
2078
        }
2778
2079
 
2779
2080
        if (block == NULL) {
2780
 
                block = (buf_block_t*) buf_page_hash_get_low(
2781
 
                        buf_pool, space, offset, fold);
 
2081
                block = (buf_block_t*) buf_page_hash_get(space, offset);
2782
2082
        }
2783
2083
 
2784
2084
loop2:
2785
 
        if (block && buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
2786
 
                block = NULL;
2787
 
        }
2788
 
 
2789
2085
        if (block == NULL) {
2790
2086
                /* Page not in buf_pool: needs to be read from file */
2791
2087
 
2792
 
                if (mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
2793
 
                        block = (buf_block_t*) buf_pool_watch_set(
2794
 
                                space, offset, fold);
2795
 
 
2796
 
                        if (UNIV_LIKELY_NULL(block)) {
2797
 
 
2798
 
                                goto got_block;
2799
 
                        }
2800
 
                }
2801
 
 
2802
 
                buf_pool_mutex_exit(buf_pool);
2803
 
 
2804
 
                if (mode == BUF_GET_IF_IN_POOL
2805
 
                    || mode == BUF_GET_IF_IN_POOL_OR_WATCH) {
 
2088
                buf_pool_mutex_exit();
 
2089
 
 
2090
                if (mode == BUF_GET_IF_IN_POOL) {
2806
2091
 
2807
2092
                        return(NULL);
2808
2093
                }
2837
2122
                goto loop;
2838
2123
        }
2839
2124
 
2840
 
got_block:
2841
2125
        ut_ad(page_zip_get_size(&block->page.zip) == zip_size);
2842
2126
 
2843
2127
        must_read = buf_block_get_io_fix(block) == BUF_IO_READ;
2844
2128
 
2845
2129
        if (must_read && mode == BUF_GET_IF_IN_POOL) {
2846
 
 
2847
 
                /* The page is being read to buffer pool,
2848
 
                but we cannot wait around for the read to
2849
 
                complete. */
2850
 
                buf_pool_mutex_exit(buf_pool);
 
2130
                /* The page is only being read to buffer */
 
2131
                buf_pool_mutex_exit();
2851
2132
 
2852
2133
                return(NULL);
2853
2134
        }
2863
2144
        case BUF_BLOCK_ZIP_DIRTY:
2864
2145
                bpage = &block->page;
2865
2146
                /* Protect bpage->buf_fix_count. */
2866
 
                mutex_enter(&buf_pool->zip_mutex);
 
2147
                mutex_enter(&buf_pool_zip_mutex);
2867
2148
 
2868
2149
                if (bpage->buf_fix_count
2869
2150
                    || buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
2870
2151
                        /* This condition often occurs when the buffer
2871
2152
                        is not buffer-fixed, but I/O-fixed by
2872
2153
                        buf_page_init_for_read(). */
2873
 
                        mutex_exit(&buf_pool->zip_mutex);
 
2154
                        mutex_exit(&buf_pool_zip_mutex);
2874
2155
wait_until_unfixed:
2875
2156
                        /* The block is buffer-fixed or I/O-fixed.
2876
2157
                        Try again later. */
2877
 
                        buf_pool_mutex_exit(buf_pool);
 
2158
                        buf_pool_mutex_exit();
2878
2159
                        os_thread_sleep(WAIT_FOR_READ);
2879
 
  
 
2160
 
2880
2161
                        goto loop;
2881
2162
                }
2882
2163
 
2883
2164
                /* Allocate an uncompressed page. */
2884
 
                buf_pool_mutex_exit(buf_pool);
2885
 
                mutex_exit(&buf_pool->zip_mutex);
 
2165
                buf_pool_mutex_exit();
 
2166
                mutex_exit(&buf_pool_zip_mutex);
2886
2167
 
2887
 
                block = buf_LRU_get_free_block(buf_pool, 0);
 
2168
                block = buf_LRU_get_free_block(0);
2888
2169
                ut_a(block);
2889
2170
 
2890
 
                buf_pool_mutex_enter(buf_pool);
 
2171
                buf_pool_mutex_enter();
2891
2172
                mutex_enter(&block->mutex);
2892
2173
 
2893
2174
                {
2894
 
                        buf_page_t*     hash_bpage;
2895
 
 
2896
 
                        hash_bpage = buf_page_hash_get_low(
2897
 
                                buf_pool, space, offset, fold);
 
2175
                        buf_page_t*     hash_bpage
 
2176
                                = buf_page_hash_get(space, offset);
2898
2177
 
2899
2178
                        if (UNIV_UNLIKELY(bpage != hash_bpage)) {
2900
2179
                                /* The buf_pool->page_hash was modified
2901
 
                                while buf_pool->mutex was released.
 
2180
                                while buf_pool_mutex was released.
2902
2181
                                Free the block that was allocated. */
2903
2182
 
2904
2183
                                buf_LRU_block_free_non_file_page(block);
2914
2193
                     || buf_page_get_io_fix(bpage) != BUF_IO_NONE)) {
2915
2194
 
2916
2195
                        /* The block was buffer-fixed or I/O-fixed
2917
 
                        while buf_pool->mutex was not held by this thread.
 
2196
                        while buf_pool_mutex was not held by this thread.
2918
2197
                        Free the block that was allocated and try again.
2919
2198
                        This should be extremely unlikely. */
2920
2199
 
2927
2206
                /* Move the compressed page from bpage to block,
2928
2207
                and uncompress it. */
2929
2208
 
2930
 
                mutex_enter(&buf_pool->zip_mutex);
 
2209
                mutex_enter(&buf_pool_zip_mutex);
2931
2210
 
2932
2211
                buf_relocate(bpage, &block->page);
2933
2212
                buf_block_init_low(block);
2957
2236
 
2958
2237
                block->page.buf_fix_count = 1;
2959
2238
                buf_block_set_io_fix(block, BUF_IO_READ);
2960
 
                rw_lock_x_lock_func(&block->lock, 0, file, line);
 
2239
                rw_lock_x_lock(&block->lock);
2961
2240
 
2962
2241
                UNIV_MEM_INVALID(bpage, sizeof *bpage);
2963
2242
 
2964
2243
                mutex_exit(&block->mutex);
2965
 
                mutex_exit(&buf_pool->zip_mutex);
 
2244
                mutex_exit(&buf_pool_zip_mutex);
2966
2245
                buf_pool->n_pend_unzip++;
2967
2246
 
2968
 
                buf_buddy_free(buf_pool, bpage, sizeof *bpage);
 
2247
                buf_buddy_free(bpage, sizeof *bpage);
2969
2248
 
2970
 
                buf_pool_mutex_exit(buf_pool);
 
2249
                buf_pool_mutex_exit();
2971
2250
 
2972
2251
                /* Decompress the page and apply buffered operations
2973
 
                while not holding buf_pool->mutex or block->mutex. */
 
2252
                while not holding buf_pool_mutex or block->mutex. */
2974
2253
                success = buf_zip_decompress(block, srv_use_checksums);
2975
2254
                ut_a(success);
2976
2255
 
2980
2259
                }
2981
2260
 
2982
2261
                /* Unfix and unlatch the block. */
2983
 
                buf_pool_mutex_enter(buf_pool);
 
2262
                buf_pool_mutex_enter();
2984
2263
                mutex_enter(&block->mutex);
2985
2264
                block->page.buf_fix_count--;
2986
2265
                buf_block_set_io_fix(block, BUF_IO_NONE);
2987
2266
                mutex_exit(&block->mutex);
2988
2267
                buf_pool->n_pend_unzip--;
2989
2268
                rw_lock_x_unlock(&block->lock);
2990
 
 
2991
2269
                break;
2992
2270
 
2993
2271
        case BUF_BLOCK_ZIP_FREE:
3017
2295
 
3018
2296
        access_time = buf_page_is_accessed(&block->page);
3019
2297
 
3020
 
        buf_pool_mutex_exit(buf_pool);
 
2298
        buf_pool_mutex_exit();
3021
2299
 
3022
2300
        buf_page_set_accessed_make_young(&block->page, access_time);
3023
2301
 
3102
2380
        ulint           line,   /*!< in: line where called */
3103
2381
        mtr_t*          mtr)    /*!< in: mini-transaction */
3104
2382
{
3105
 
        buf_pool_t*     buf_pool;
3106
2383
        unsigned        access_time;
3107
2384
        ibool           success;
3108
2385
        ulint           fix_type;
3196
2473
        ut_a(ibuf_count_get(buf_block_get_space(block),
3197
2474
                            buf_block_get_page_no(block)) == 0);
3198
2475
#endif
3199
 
        buf_pool = buf_pool_from_block(block);
3200
2476
        buf_pool->stat.n_page_gets++;
3201
2477
 
3202
2478
        return(TRUE);
3218
2494
        ulint           line,   /*!< in: line where called */
3219
2495
        mtr_t*          mtr)    /*!< in: mini-transaction */
3220
2496
{
3221
 
        buf_pool_t*     buf_pool;
3222
2497
        ibool           success;
3223
2498
        ulint           fix_type;
3224
2499
 
3247
2522
 
3248
2523
        mutex_exit(&block->mutex);
3249
2524
 
3250
 
        buf_pool = buf_pool_from_block(block);
3251
 
 
3252
2525
        if (mode == BUF_MAKE_YOUNG && buf_page_peek_if_too_old(&block->page)) {
3253
 
                buf_pool_mutex_enter(buf_pool);
 
2526
                buf_pool_mutex_enter();
3254
2527
                buf_LRU_make_block_young(&block->page);
3255
 
                buf_pool_mutex_exit(buf_pool);
 
2528
                buf_pool_mutex_exit();
3256
2529
        } else if (!buf_page_is_accessed(&block->page)) {
3257
2530
                /* Above, we do a dirty read on purpose, to avoid
3258
2531
                mutex contention.  The field buf_page_t::access_time
3260
2533
                field must be protected by mutex, however. */
3261
2534
                ulint   time_ms = ut_time_ms();
3262
2535
 
3263
 
                buf_pool_mutex_enter(buf_pool);
 
2536
                buf_pool_mutex_enter();
3264
2537
                buf_page_set_accessed(&block->page, time_ms);
3265
 
                buf_pool_mutex_exit(buf_pool);
 
2538
                buf_pool_mutex_exit();
3266
2539
        }
3267
2540
 
3268
2541
        ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
3324
2597
        buf_block_t*    block;
3325
2598
        ibool           success;
3326
2599
        ulint           fix_type;
3327
 
        buf_pool_t*     buf_pool = buf_pool_get(space_id, page_no);
3328
2600
 
3329
2601
        ut_ad(mtr);
3330
2602
        ut_ad(mtr->state == MTR_ACTIVE);
3331
2603
 
3332
 
        buf_pool_mutex_enter(buf_pool);
3333
 
        block = buf_block_hash_get(buf_pool, space_id, page_no);
 
2604
        buf_pool_mutex_enter();
 
2605
        block = buf_block_hash_get(space_id, page_no);
3334
2606
 
3335
 
        if (!block || buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE) {
3336
 
                buf_pool_mutex_exit(buf_pool);
 
2607
        if (!block) {
 
2608
                buf_pool_mutex_exit();
3337
2609
                return(NULL);
3338
2610
        }
3339
2611
 
3340
 
        ut_ad(!buf_pool_watch_is_sentinel(buf_pool, &block->page));
3341
 
 
3342
2612
        mutex_enter(&block->mutex);
3343
 
        buf_pool_mutex_exit(buf_pool);
 
2613
        buf_pool_mutex_exit();
3344
2614
 
3345
2615
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
3346
2616
        ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
3423
2693
        ulint           space,  /*!< in: space id */
3424
2694
        ulint           offset, /*!< in: offset of the page within space
3425
2695
                                in units of a page */
3426
 
        ulint           fold,   /*!< in: buf_page_address_fold(space,offset) */
3427
2696
        buf_block_t*    block)  /*!< in: block to init */
3428
2697
{
3429
2698
        buf_page_t*     hash_page;
3430
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
3431
2699
 
3432
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
2700
        ut_ad(buf_pool_mutex_own());
3433
2701
        ut_ad(mutex_own(&(block->mutex)));
3434
2702
        ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
3435
2703
 
3447
2715
 
3448
2716
        buf_block_init_low(block);
3449
2717
 
3450
 
        block->lock_hash_val = lock_rec_hash(space, offset);
3451
 
 
3452
 
        buf_page_init_low(&block->page);
 
2718
        block->lock_hash_val    = lock_rec_hash(space, offset);
3453
2719
 
3454
2720
        /* Insert into the hash table of file pages */
3455
2721
 
3456
 
        hash_page = buf_page_hash_get_low(buf_pool, space, offset, fold);
3457
 
 
3458
 
        if (UNIV_LIKELY(!hash_page)) {
3459
 
        } else if (buf_pool_watch_is_sentinel(buf_pool, hash_page)) {
3460
 
                /* Preserve the reference count. */
3461
 
                ulint   buf_fix_count = hash_page->buf_fix_count;
3462
 
 
3463
 
                ut_a(buf_fix_count > 0);
3464
 
                block->page.buf_fix_count += buf_fix_count;
3465
 
                buf_pool_watch_remove(buf_pool, fold, hash_page);
3466
 
        } else {
 
2722
        hash_page = buf_page_hash_get(space, offset);
 
2723
 
 
2724
        if (UNIV_LIKELY_NULL(hash_page)) {
3467
2725
                fprintf(stderr,
3468
2726
                        "InnoDB: Error: page %lu %lu already found"
3469
2727
                        " in the hash table: %p, %p\n",
3472
2730
                        (const void*) hash_page, (const void*) block);
3473
2731
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
3474
2732
                mutex_exit(&block->mutex);
3475
 
                buf_pool_mutex_exit(buf_pool);
 
2733
                buf_pool_mutex_exit();
3476
2734
                buf_print();
3477
2735
                buf_LRU_print();
3478
2736
                buf_validate();
3481
2739
                ut_error;
3482
2740
        }
3483
2741
 
 
2742
        buf_page_init_low(&block->page);
 
2743
 
3484
2744
        ut_ad(!block->page.in_zip_hash);
3485
2745
        ut_ad(!block->page.in_page_hash);
3486
2746
        ut_d(block->page.in_page_hash = TRUE);
3487
2747
        HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
3488
 
                    fold, &block->page);
 
2748
                    buf_page_address_fold(space, offset), &block->page);
3489
2749
}
3490
2750
 
3491
2751
/********************************************************************//**
3507
2767
        ulint           space,  /*!< in: space id */
3508
2768
        ulint           zip_size,/*!< in: compressed page size, or 0 */
3509
2769
        ibool           unzip,  /*!< in: TRUE=request uncompressed page */
3510
 
        ib_int64_t      tablespace_version,
3511
 
                                /*!< in: prevents reading from a wrong
 
2770
        ib_int64_t      tablespace_version,/*!< in: prevents reading from a wrong
3512
2771
                                version of the tablespace in case we have done
3513
2772
                                DISCARD + IMPORT */
3514
2773
        ulint           offset) /*!< in: page number */
3515
2774
{
3516
2775
        buf_block_t*    block;
3517
 
        buf_page_t*     bpage   = NULL;
3518
 
        buf_page_t*     watch_page;
 
2776
        buf_page_t*     bpage;
3519
2777
        mtr_t           mtr;
3520
 
        ulint           fold;
3521
2778
        ibool           lru     = FALSE;
3522
2779
        void*           data;
3523
 
        buf_pool_t*     buf_pool = buf_pool_get(space, offset);
3524
2780
 
3525
2781
        ut_ad(buf_pool);
3526
2782
 
3549
2805
            && UNIV_LIKELY(!recv_recovery_is_on())) {
3550
2806
                block = NULL;
3551
2807
        } else {
3552
 
                block = buf_LRU_get_free_block(buf_pool, 0);
 
2808
                block = buf_LRU_get_free_block(0);
3553
2809
                ut_ad(block);
3554
 
                ut_ad(buf_pool_from_block(block) == buf_pool);
3555
2810
        }
3556
2811
 
3557
 
        fold = buf_page_address_fold(space, offset);
3558
 
 
3559
 
        buf_pool_mutex_enter(buf_pool);
3560
 
 
3561
 
        watch_page = buf_page_hash_get_low(buf_pool, space, offset, fold);
3562
 
        if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) {
 
2812
        buf_pool_mutex_enter();
 
2813
 
 
2814
        if (buf_page_hash_get(space, offset)) {
3563
2815
                /* The page is already in the buffer pool. */
3564
 
                watch_page = NULL;
3565
2816
err_exit:
3566
2817
                if (block) {
3567
2818
                        mutex_enter(&block->mutex);
3585
2836
        if (block) {
3586
2837
                bpage = &block->page;
3587
2838
                mutex_enter(&block->mutex);
3588
 
 
3589
 
                ut_ad(buf_pool_from_bpage(bpage) == buf_pool);
3590
 
 
3591
 
                buf_page_init(space, offset, fold, block);
 
2839
                buf_page_init(space, offset, block);
3592
2840
 
3593
2841
                /* The block must be put to the LRU list, to the old blocks */
3594
2842
                buf_LRU_add_block(bpage, TRUE/* to old blocks */);
3608
2856
                if (UNIV_UNLIKELY(zip_size)) {
3609
2857
                        page_zip_set_size(&block->page.zip, zip_size);
3610
2858
 
3611
 
                        /* buf_pool->mutex may be released and
 
2859
                        /* buf_pool_mutex may be released and
3612
2860
                        reacquired by buf_buddy_alloc().  Thus, we
3613
2861
                        must release block->mutex in order not to
3614
2862
                        break the latching order in the reacquisition
3615
 
                        of buf_pool->mutex.  We also must defer this
 
2863
                        of buf_pool_mutex.  We also must defer this
3616
2864
                        operation until after the block descriptor has
3617
2865
                        been added to buf_pool->LRU and
3618
2866
                        buf_pool->page_hash. */
3619
2867
                        mutex_exit(&block->mutex);
3620
 
                        data = buf_buddy_alloc(buf_pool, zip_size, &lru);
 
2868
                        data = buf_buddy_alloc(zip_size, &lru);
3621
2869
                        mutex_enter(&block->mutex);
3622
2870
                        block->page.zip.data = data;
3623
2871
 
3641
2889
                control block (bpage), in order to avoid the
3642
2890
                invocation of buf_buddy_relocate_block() on
3643
2891
                uninitialized data. */
3644
 
                data = buf_buddy_alloc(buf_pool, zip_size, &lru);
3645
 
                bpage = buf_buddy_alloc(buf_pool, sizeof *bpage, &lru);
3646
 
 
3647
 
                /* Initialize the buf_pool pointer. */
3648
 
                bpage->buf_pool = buf_pool;
 
2892
                data = buf_buddy_alloc(zip_size, &lru);
 
2893
                bpage = buf_buddy_alloc(sizeof *bpage, &lru);
3649
2894
 
3650
2895
                /* If buf_buddy_alloc() allocated storage from the LRU list,
3651
 
                it released and reacquired buf_pool->mutex.  Thus, we must
 
2896
                it released and reacquired buf_pool_mutex.  Thus, we must
3652
2897
                check the page_hash again, as it may have been modified. */
3653
 
                if (UNIV_UNLIKELY(lru)) {
3654
 
 
3655
 
                        watch_page = buf_page_hash_get_low(
3656
 
                                buf_pool, space, offset, fold);
3657
 
 
3658
 
                        if (watch_page
3659
 
                            && !buf_pool_watch_is_sentinel(buf_pool,
3660
 
                                                           watch_page)) {
3661
 
 
3662
 
                                /* The block was added by some other thread. */
3663
 
                                watch_page = NULL;
3664
 
                                buf_buddy_free(buf_pool, bpage, sizeof *bpage);
3665
 
                                buf_buddy_free(buf_pool, data, zip_size);
3666
 
 
3667
 
                                bpage = NULL;
3668
 
                                goto func_exit;
3669
 
                        }
 
2898
                if (UNIV_UNLIKELY(lru)
 
2899
                    && UNIV_LIKELY_NULL(buf_page_hash_get(space, offset))) {
 
2900
 
 
2901
                        /* The block was added by some other thread. */
 
2902
                        buf_buddy_free(bpage, sizeof *bpage);
 
2903
                        buf_buddy_free(data, zip_size);
 
2904
 
 
2905
                        bpage = NULL;
 
2906
                        goto func_exit;
3670
2907
                }
3671
2908
 
3672
2909
                page_zip_des_init(&bpage->zip);
3673
2910
                page_zip_set_size(&bpage->zip, zip_size);
3674
2911
                bpage->zip.data = data;
3675
2912
 
3676
 
                mutex_enter(&buf_pool->zip_mutex);
 
2913
                mutex_enter(&buf_pool_zip_mutex);
3677
2914
                UNIV_MEM_DESC(bpage->zip.data,
3678
2915
                              page_zip_get_size(&bpage->zip), bpage);
3679
 
 
3680
2916
                buf_page_init_low(bpage);
3681
 
 
3682
2917
                bpage->state    = BUF_BLOCK_ZIP_PAGE;
3683
2918
                bpage->space    = space;
3684
2919
                bpage->offset   = offset;
3685
2920
 
3686
 
 
3687
2921
#ifdef UNIV_DEBUG
3688
2922
                bpage->in_page_hash = FALSE;
3689
2923
                bpage->in_zip_hash = FALSE;
3693
2927
#endif /* UNIV_DEBUG */
3694
2928
 
3695
2929
                ut_d(bpage->in_page_hash = TRUE);
3696
 
 
3697
 
                if (UNIV_LIKELY_NULL(watch_page)) {
3698
 
                        /* Preserve the reference count. */
3699
 
                        ulint   buf_fix_count = watch_page->buf_fix_count;
3700
 
                        ut_a(buf_fix_count > 0);
3701
 
                        bpage->buf_fix_count += buf_fix_count;
3702
 
                        ut_ad(buf_pool_watch_is_sentinel(buf_pool, watch_page));
3703
 
                        buf_pool_watch_remove(buf_pool, fold, watch_page);
3704
 
                }
3705
 
 
3706
 
                HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold,
3707
 
                            bpage);
 
2930
                HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
 
2931
                            buf_page_address_fold(space, offset), bpage);
3708
2932
 
3709
2933
                /* The block must be put to the LRU list, to the old blocks */
3710
2934
                buf_LRU_add_block(bpage, TRUE/* to old blocks */);
3712
2936
 
3713
2937
                buf_page_set_io_fix(bpage, BUF_IO_READ);
3714
2938
 
3715
 
                mutex_exit(&buf_pool->zip_mutex);
 
2939
                mutex_exit(&buf_pool_zip_mutex);
3716
2940
        }
3717
2941
 
3718
2942
        buf_pool->n_pend_reads++;
3719
2943
func_exit:
3720
 
        buf_pool_mutex_exit(buf_pool);
 
2944
        buf_pool_mutex_exit();
3721
2945
 
3722
2946
        if (mode == BUF_READ_IBUF_PAGES_ONLY) {
3723
2947
 
3746
2970
{
3747
2971
        buf_frame_t*    frame;
3748
2972
        buf_block_t*    block;
3749
 
        ulint           fold;
3750
2973
        buf_block_t*    free_block      = NULL;
3751
2974
        ulint           time_ms         = ut_time_ms();
3752
 
        buf_pool_t*     buf_pool        = buf_pool_get(space, offset);
3753
2975
 
3754
2976
        ut_ad(mtr);
3755
2977
        ut_ad(mtr->state == MTR_ACTIVE);
3756
2978
        ut_ad(space || !zip_size);
3757
2979
 
3758
 
        free_block = buf_LRU_get_free_block(buf_pool, 0);
3759
 
 
3760
 
        fold = buf_page_address_fold(space, offset);
3761
 
 
3762
 
        buf_pool_mutex_enter(buf_pool);
3763
 
 
3764
 
        block = (buf_block_t*) buf_page_hash_get_low(
3765
 
                buf_pool, space, offset, fold);
3766
 
 
3767
 
        if (block
3768
 
            && buf_page_in_file(&block->page)
3769
 
            && !buf_pool_watch_is_sentinel(buf_pool, &block->page)) {
 
2980
        free_block = buf_LRU_get_free_block(0);
 
2981
 
 
2982
        buf_pool_mutex_enter();
 
2983
 
 
2984
        block = (buf_block_t*) buf_page_hash_get(space, offset);
 
2985
 
 
2986
        if (block && buf_page_in_file(&block->page)) {
3770
2987
#ifdef UNIV_IBUF_COUNT_DEBUG
3771
2988
                ut_a(ibuf_count_get(space, offset) == 0);
3772
2989
#endif
3775
2992
#endif /* UNIV_DEBUG_FILE_ACCESSES */
3776
2993
 
3777
2994
                /* Page can be found in buf_pool */
3778
 
                buf_pool_mutex_exit(buf_pool);
 
2995
                buf_pool_mutex_exit();
3779
2996
 
3780
2997
                buf_block_free(free_block);
3781
2998
 
3796
3013
 
3797
3014
        mutex_enter(&block->mutex);
3798
3015
 
3799
 
        buf_page_init(space, offset, fold, block);
 
3016
        buf_page_init(space, offset, block);
3800
3017
 
3801
3018
        /* The block must be put to the LRU list */
3802
3019
        buf_LRU_add_block(&block->page, FALSE);
3809
3026
                ibool   lru;
3810
3027
 
3811
3028
                /* Prevent race conditions during buf_buddy_alloc(),
3812
 
                which may release and reacquire buf_pool->mutex,
 
3029
                which may release and reacquire buf_pool_mutex,
3813
3030
                by IO-fixing and X-latching the block. */
3814
3031
 
3815
3032
                buf_page_set_io_fix(&block->page, BUF_IO_READ);
3817
3034
 
3818
3035
                page_zip_set_size(&block->page.zip, zip_size);
3819
3036
                mutex_exit(&block->mutex);
3820
 
                /* buf_pool->mutex may be released and reacquired by
 
3037
                /* buf_pool_mutex may be released and reacquired by
3821
3038
                buf_buddy_alloc().  Thus, we must release block->mutex
3822
3039
                in order not to break the latching order in
3823
 
                the reacquisition of buf_pool->mutex.  We also must
 
3040
                the reacquisition of buf_pool_mutex.  We also must
3824
3041
                defer this operation until after the block descriptor
3825
3042
                has been added to buf_pool->LRU and buf_pool->page_hash. */
3826
 
                data = buf_buddy_alloc(buf_pool, zip_size, &lru);
 
3043
                data = buf_buddy_alloc(zip_size, &lru);
3827
3044
                mutex_enter(&block->mutex);
3828
3045
                block->page.zip.data = data;
3829
3046
 
3841
3058
 
3842
3059
        buf_page_set_accessed(&block->page, time_ms);
3843
3060
 
3844
 
        buf_pool_mutex_exit(buf_pool);
 
3061
        buf_pool_mutex_exit();
3845
3062
 
3846
3063
        mtr_memo_push(mtr, block, MTR_MEMO_BUF_FIX);
3847
3064
 
3853
3070
        ibuf_merge_or_delete_for_page(NULL, space, offset, zip_size, TRUE);
3854
3071
 
3855
3072
        /* Flush pages from the end of the LRU list if necessary */
3856
 
        buf_flush_free_margin(buf_pool);
 
3073
        buf_flush_free_margin();
3857
3074
 
3858
3075
        frame = block->frame;
3859
3076
 
3889
3106
        buf_page_t*     bpage)  /*!< in: pointer to the block in question */
3890
3107
{
3891
3108
        enum buf_io_fix io_type;
3892
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
3893
3109
        const ibool     uncompressed = (buf_page_get_state(bpage)
3894
3110
                                        == BUF_BLOCK_FILE_PAGE);
3895
3111
 
4025
3241
                }
4026
3242
        }
4027
3243
 
4028
 
        buf_pool_mutex_enter(buf_pool);
 
3244
        buf_pool_mutex_enter();
4029
3245
        mutex_enter(buf_page_get_mutex(bpage));
4030
3246
 
4031
3247
#ifdef UNIV_IBUF_COUNT_DEBUG
4089
3305
#endif /* UNIV_DEBUG */
4090
3306
 
4091
3307
        mutex_exit(buf_page_get_mutex(bpage));
4092
 
        buf_pool_mutex_exit(buf_pool);
4093
 
}
4094
 
 
4095
 
/*********************************************************************//**
4096
 
Asserts that all file pages in the buffer are in a replaceable state.
4097
 
@return TRUE */
4098
 
static
4099
 
ibool
4100
 
buf_all_freed_instance(
4101
 
/*===================*/
4102
 
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instancce */
4103
 
{
4104
 
        ulint           i;
4105
 
        buf_chunk_t*    chunk;
4106
 
 
4107
 
        ut_ad(buf_pool);
4108
 
 
4109
 
        buf_pool_mutex_enter(buf_pool);
4110
 
 
4111
 
        chunk = buf_pool->chunks;
4112
 
 
4113
 
        for (i = buf_pool->n_chunks; i--; chunk++) {
4114
 
 
4115
 
                const buf_block_t* block = buf_chunk_not_freed(chunk);
4116
 
 
4117
 
                if (UNIV_LIKELY_NULL(block)) {
4118
 
                        fprintf(stderr,
4119
 
                                "Page %lu %lu still fixed or dirty\n",
4120
 
                                (ulong) block->page.space,
4121
 
                                (ulong) block->page.offset);
4122
 
                        ut_error;
4123
 
                }
4124
 
        }
4125
 
 
4126
 
        buf_pool_mutex_exit(buf_pool);
4127
 
 
4128
 
        return(TRUE);
4129
 
}
4130
 
 
4131
 
/*********************************************************************//**
4132
 
Invalidates file pages in one buffer pool instance */
4133
 
static
 
3308
        buf_pool_mutex_exit();
 
3309
}
 
3310
 
 
3311
/*********************************************************************//**
 
3312
Invalidates the file pages in the buffer pool when an archive recovery is
 
3313
completed. All the file pages buffered must be in a replaceable state when
 
3314
this function is called: not latched and not modified. */
 
3315
UNIV_INTERN
4134
3316
void
4135
 
buf_pool_invalidate_instance(
4136
 
/*=========================*/
4137
 
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
 
3317
buf_pool_invalidate(void)
 
3318
/*=====================*/
4138
3319
{
4139
3320
        ibool           freed;
4140
3321
        enum buf_flush  i;
4141
3322
 
4142
 
        buf_pool_mutex_enter(buf_pool);
 
3323
        buf_pool_mutex_enter();
4143
3324
 
4144
3325
        for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
4145
3326
 
4155
3336
                pool invalidation to proceed we must ensure there is NO
4156
3337
                write activity happening. */
4157
3338
                if (buf_pool->n_flush[i] > 0) {
4158
 
                        buf_pool_mutex_exit(buf_pool);
4159
 
                        buf_flush_wait_batch_end(buf_pool, i);
4160
 
                        buf_pool_mutex_enter(buf_pool);
 
3339
                        buf_pool_mutex_exit();
 
3340
                        buf_flush_wait_batch_end(i);
 
3341
                        buf_pool_mutex_enter();
4161
3342
                }
4162
3343
        }
4163
3344
 
4164
 
        buf_pool_mutex_exit(buf_pool);
 
3345
        buf_pool_mutex_exit();
4165
3346
 
4166
 
        ut_ad(buf_all_freed_instance(buf_pool));
 
3347
        ut_ad(buf_all_freed());
4167
3348
 
4168
3349
        freed = TRUE;
4169
3350
 
4170
3351
        while (freed) {
4171
 
                freed = buf_LRU_search_and_free_block(buf_pool, 100);
 
3352
                freed = buf_LRU_search_and_free_block(100);
4172
3353
        }
4173
3354
 
4174
 
        buf_pool_mutex_enter(buf_pool);
 
3355
        buf_pool_mutex_enter();
4175
3356
 
4176
3357
        ut_ad(UT_LIST_GET_LEN(buf_pool->LRU) == 0);
4177
3358
        ut_ad(UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0);
4182
3363
        buf_pool->LRU_flush_ended = 0;
4183
3364
 
4184
3365
        memset(&buf_pool->stat, 0x00, sizeof(buf_pool->stat));
4185
 
        buf_refresh_io_stats(buf_pool);
4186
 
 
4187
 
        buf_pool_mutex_exit(buf_pool);
4188
 
}
4189
 
 
4190
 
/*********************************************************************//**
4191
 
Invalidates the file pages in the buffer pool when an archive recovery is
4192
 
completed. All the file pages buffered must be in a replaceable state when
4193
 
this function is called: not latched and not modified. */
4194
 
UNIV_INTERN
4195
 
void
4196
 
buf_pool_invalidate(void)
4197
 
/*=====================*/
4198
 
{
4199
 
        ulint   i;
4200
 
 
4201
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4202
 
                buf_pool_invalidate_instance(buf_pool_from_array(i));
4203
 
        }
 
3366
        buf_refresh_io_stats();
 
3367
 
 
3368
        buf_pool_mutex_exit();
4204
3369
}
4205
3370
 
4206
3371
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
4207
3372
/*********************************************************************//**
4208
 
Validates data in one buffer pool instance
 
3373
Validates the buffer buf_pool data structure.
4209
3374
@return TRUE */
4210
 
static
 
3375
UNIV_INTERN
4211
3376
ibool
4212
 
buf_pool_validate_instance(
4213
 
/*=======================*/
4214
 
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
 
3377
buf_validate(void)
 
3378
/*==============*/
4215
3379
{
4216
3380
        buf_page_t*     b;
4217
3381
        buf_chunk_t*    chunk;
4226
3390
 
4227
3391
        ut_ad(buf_pool);
4228
3392
 
4229
 
        buf_pool_mutex_enter(buf_pool);
 
3393
        buf_pool_mutex_enter();
4230
3394
 
4231
3395
        chunk = buf_pool->chunks;
4232
3396
 
4251
3415
                                break;
4252
3416
 
4253
3417
                        case BUF_BLOCK_FILE_PAGE:
4254
 
                                ut_a(buf_page_hash_get(buf_pool,
4255
 
                                                       buf_block_get_space(
 
3418
                                ut_a(buf_page_hash_get(buf_block_get_space(
4256
3419
                                                               block),
4257
3420
                                                       buf_block_get_page_no(
4258
3421
                                                               block))
4299
3462
                                }
4300
3463
 
4301
3464
                                n_lru++;
 
3465
 
 
3466
                                if (block->page.oldest_modification > 0) {
 
3467
                                        n_flush++;
 
3468
                                }
 
3469
 
4302
3470
                                break;
4303
3471
 
4304
3472
                        case BUF_BLOCK_NOT_USED:
4316
3484
                }
4317
3485
        }
4318
3486
 
4319
 
        mutex_enter(&buf_pool->zip_mutex);
 
3487
        mutex_enter(&buf_pool_zip_mutex);
4320
3488
 
4321
3489
        /* Check clean compressed-only blocks. */
4322
3490
 
4337
3505
                        ut_error;
4338
3506
                        break;
4339
3507
                }
4340
 
 
4341
 
                /* It is OK to read oldest_modification here because
4342
 
                we have acquired buf_pool->zip_mutex above which acts
4343
 
                as the 'block->mutex' for these bpages. */
4344
3508
                ut_a(!b->oldest_modification);
4345
 
                ut_a(buf_page_hash_get(buf_pool, b->space, b->offset) == b);
 
3509
                ut_a(buf_page_hash_get(b->space, b->offset) == b);
4346
3510
 
4347
3511
                n_lru++;
4348
3512
                n_zip++;
4349
3513
        }
4350
3514
 
4351
 
        /* Check dirty blocks. */
 
3515
        /* Check dirty compressed-only blocks. */
4352
3516
 
4353
 
        buf_flush_list_mutex_enter(buf_pool);
4354
3517
        for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
4355
3518
             b = UT_LIST_GET_NEXT(list, b)) {
4356
3519
                ut_ad(b->in_flush_list);
4357
 
                ut_a(b->oldest_modification);
4358
 
                n_flush++;
4359
3520
 
4360
3521
                switch (buf_page_get_state(b)) {
4361
3522
                case BUF_BLOCK_ZIP_DIRTY:
 
3523
                        ut_a(b->oldest_modification);
4362
3524
                        n_lru++;
 
3525
                        n_flush++;
4363
3526
                        n_zip++;
4364
3527
                        switch (buf_page_get_io_fix(b)) {
4365
3528
                        case BUF_IO_NONE:
4366
3529
                        case BUF_IO_READ:
4367
3530
                                break;
 
3531
 
4368
3532
                        case BUF_IO_WRITE:
4369
3533
                                switch (buf_page_get_flush_type(b)) {
4370
3534
                                case BUF_FLUSH_LRU:
4394
3558
                        ut_error;
4395
3559
                        break;
4396
3560
                }
4397
 
                ut_a(buf_page_hash_get(buf_pool, b->space, b->offset) == b);
 
3561
                ut_a(buf_page_hash_get(b->space, b->offset) == b);
4398
3562
        }
4399
3563
 
4400
 
        ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush);
4401
 
 
4402
 
        buf_flush_list_mutex_exit(buf_pool);
4403
 
 
4404
 
        mutex_exit(&buf_pool->zip_mutex);
 
3564
        mutex_exit(&buf_pool_zip_mutex);
4405
3565
 
4406
3566
        if (n_lru + n_free > buf_pool->curr_size + n_zip) {
4407
3567
                fprintf(stderr, "n LRU %lu, n free %lu, pool %lu zip %lu\n",
4417
3577
                        (ulong) n_free);
4418
3578
                ut_error;
4419
3579
        }
 
3580
        ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == n_flush);
4420
3581
 
4421
3582
        ut_a(buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE] == n_single_flush);
4422
3583
        ut_a(buf_pool->n_flush[BUF_FLUSH_LIST] == n_list_flush);
4423
3584
        ut_a(buf_pool->n_flush[BUF_FLUSH_LRU] == n_lru_flush);
4424
3585
 
4425
 
        buf_pool_mutex_exit(buf_pool);
 
3586
        buf_pool_mutex_exit();
4426
3587
 
4427
3588
        ut_a(buf_LRU_validate());
4428
 
        ut_a(buf_flush_validate(buf_pool));
4429
 
 
4430
 
        return(TRUE);
4431
 
}
4432
 
 
4433
 
/*********************************************************************//**
4434
 
Validates the buffer buf_pool data structure.
4435
 
@return TRUE */
4436
 
UNIV_INTERN
4437
 
ibool
4438
 
buf_validate(void)
4439
 
/*==============*/
4440
 
{
4441
 
        ulint   i;
4442
 
 
4443
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4444
 
                buf_pool_t*     buf_pool;
4445
 
 
4446
 
                buf_pool = buf_pool_from_array(i);
4447
 
 
4448
 
                buf_pool_validate_instance(buf_pool);
4449
 
        }
4450
 
        return(TRUE);
4451
 
}
4452
 
 
 
3589
        ut_a(buf_flush_validate());
 
3590
 
 
3591
        return(TRUE);
 
3592
}
4453
3593
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
4454
3594
 
4455
3595
#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
4456
3596
/*********************************************************************//**
4457
 
Prints info of the buffer buf_pool data structure for one instance. */
4458
 
static
 
3597
Prints info of the buffer buf_pool data structure. */
 
3598
UNIV_INTERN
4459
3599
void
4460
 
buf_print_instance(
4461
 
/*===============*/
4462
 
        buf_pool_t*     buf_pool)
 
3600
buf_print(void)
 
3601
/*===========*/
4463
3602
{
4464
 
        index_id_t*     index_ids;
 
3603
        dulint*         index_ids;
4465
3604
        ulint*          counts;
4466
3605
        ulint           size;
4467
3606
        ulint           i;
4468
3607
        ulint           j;
4469
 
        index_id_t      id;
 
3608
        dulint          id;
4470
3609
        ulint           n_found;
4471
3610
        buf_chunk_t*    chunk;
4472
3611
        dict_index_t*   index;
4475
3614
 
4476
3615
        size = buf_pool->curr_size;
4477
3616
 
4478
 
        index_ids = mem_alloc(size * sizeof *index_ids);
 
3617
        index_ids = mem_alloc(sizeof(dulint) * size);
4479
3618
        counts = mem_alloc(sizeof(ulint) * size);
4480
3619
 
4481
 
        buf_pool_mutex_enter(buf_pool);
4482
 
        buf_flush_list_mutex_enter(buf_pool);
 
3620
        buf_pool_mutex_enter();
4483
3621
 
4484
3622
        fprintf(stderr,
4485
3623
                "buf_pool size %lu\n"
4506
3644
                (ulong) buf_pool->stat.n_pages_created,
4507
3645
                (ulong) buf_pool->stat.n_pages_written);
4508
3646
 
4509
 
        buf_flush_list_mutex_exit(buf_pool);
4510
 
 
4511
3647
        /* Count the number of blocks belonging to each index in the buffer */
4512
3648
 
4513
3649
        n_found = 0;
4530
3666
 
4531
3667
                                while (j < n_found) {
4532
3668
 
4533
 
                                        if (index_ids[j] == id) {
 
3669
                                        if (ut_dulint_cmp(index_ids[j],
 
3670
                                                          id) == 0) {
4534
3671
                                                counts[j]++;
4535
3672
 
4536
3673
                                                break;
4547
3684
                }
4548
3685
        }
4549
3686
 
4550
 
        buf_pool_mutex_exit(buf_pool);
 
3687
        buf_pool_mutex_exit();
4551
3688
 
4552
3689
        for (i = 0; i < n_found; i++) {
4553
3690
                index = dict_index_get_if_in_cache(index_ids[i]);
4554
3691
 
4555
3692
                fprintf(stderr,
4556
 
                        "Block count for index %llu in buffer is about %lu",
4557
 
                        (ullint) index_ids[i],
 
3693
                        "Block count for index %lu in buffer is about %lu",
 
3694
                        (ulong) ut_dulint_get_low(index_ids[i]),
4558
3695
                        (ulong) counts[i]);
4559
3696
 
4560
3697
                if (index) {
4568
3705
        mem_free(index_ids);
4569
3706
        mem_free(counts);
4570
3707
 
4571
 
        ut_a(buf_pool_validate_instance(buf_pool));
4572
 
}
4573
 
 
4574
 
/*********************************************************************//**
4575
 
Prints info of the buffer buf_pool data structure. */
4576
 
UNIV_INTERN
4577
 
void
4578
 
buf_print(void)
4579
 
/*===========*/
4580
 
{
4581
 
        ulint   i;
4582
 
 
4583
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4584
 
                buf_pool_t*     buf_pool;
4585
 
 
4586
 
                buf_pool = buf_pool_from_array(i);
4587
 
                buf_print_instance(buf_pool);
4588
 
        }
 
3708
        ut_a(buf_validate());
4589
3709
}
4590
3710
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
4591
3711
 
4595
3715
@return number of latched pages */
4596
3716
UNIV_INTERN
4597
3717
ulint
4598
 
buf_get_latched_pages_number_instance(
4599
 
/*==================================*/
4600
 
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
 
3718
buf_get_latched_pages_number(void)
 
3719
/*==============================*/
4601
3720
{
 
3721
        buf_chunk_t*    chunk;
4602
3722
        buf_page_t*     b;
4603
3723
        ulint           i;
4604
 
        buf_chunk_t*    chunk;
4605
3724
        ulint           fixed_pages_number = 0;
4606
3725
 
4607
 
        buf_pool_mutex_enter(buf_pool);
 
3726
        buf_pool_mutex_enter();
4608
3727
 
4609
3728
        chunk = buf_pool->chunks;
4610
3729
 
4633
3752
                }
4634
3753
        }
4635
3754
 
4636
 
        mutex_enter(&buf_pool->zip_mutex);
 
3755
        mutex_enter(&buf_pool_zip_mutex);
4637
3756
 
4638
3757
        /* Traverse the lists of clean and dirty compressed-only blocks. */
4639
3758
 
4648
3767
                }
4649
3768
        }
4650
3769
 
4651
 
        buf_flush_list_mutex_enter(buf_pool);
4652
3770
        for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
4653
3771
             b = UT_LIST_GET_NEXT(list, b)) {
4654
3772
                ut_ad(b->in_flush_list);
4674
3792
                }
4675
3793
        }
4676
3794
 
4677
 
        buf_flush_list_mutex_exit(buf_pool);
4678
 
        mutex_exit(&buf_pool->zip_mutex);
4679
 
        buf_pool_mutex_exit(buf_pool);
 
3795
        mutex_exit(&buf_pool_zip_mutex);
 
3796
        buf_pool_mutex_exit();
4680
3797
 
4681
3798
        return(fixed_pages_number);
4682
3799
}
4683
 
 
4684
 
/*********************************************************************//**
4685
 
Returns the number of latched pages in all the buffer pools.
4686
 
@return number of latched pages */
4687
 
UNIV_INTERN
4688
 
ulint
4689
 
buf_get_latched_pages_number(void)
4690
 
/*==============================*/
4691
 
{
4692
 
        ulint   i;
4693
 
        ulint   total_latched_pages = 0;
4694
 
 
4695
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4696
 
                buf_pool_t*     buf_pool;
4697
 
 
4698
 
                buf_pool = buf_pool_from_array(i);
4699
 
 
4700
 
                total_latched_pages += buf_get_latched_pages_number_instance(
4701
 
                        buf_pool);
4702
 
        }
4703
 
 
4704
 
        return(total_latched_pages);
4705
 
}
4706
 
 
4707
3800
#endif /* UNIV_DEBUG */
4708
3801
 
4709
3802
/*********************************************************************//**
4714
3807
buf_get_n_pending_ios(void)
4715
3808
/*=======================*/
4716
3809
{
4717
 
        ulint   i;
4718
 
        ulint   pend_ios = 0;
4719
 
 
4720
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4721
 
                buf_pool_t*     buf_pool;
4722
 
 
4723
 
                buf_pool = buf_pool_from_array(i);
4724
 
 
4725
 
                pend_ios +=
4726
 
                        buf_pool->n_pend_reads
4727
 
                        + buf_pool->n_flush[BUF_FLUSH_LRU]
4728
 
                        + buf_pool->n_flush[BUF_FLUSH_LIST]
4729
 
                        + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE];
4730
 
        }
4731
 
 
4732
 
        return(pend_ios);
 
3810
        return(buf_pool->n_pend_reads
 
3811
               + buf_pool->n_flush[BUF_FLUSH_LRU]
 
3812
               + buf_pool->n_flush[BUF_FLUSH_LIST]
 
3813
               + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);
4733
3814
}
4734
3815
 
4735
3816
/*********************************************************************//**
4741
3822
buf_get_modified_ratio_pct(void)
4742
3823
/*============================*/
4743
3824
{
4744
 
        ulint           ratio;
4745
 
        ulint           lru_len = 0;
4746
 
        ulint           free_len = 0;
4747
 
        ulint           flush_list_len = 0;
4748
 
 
4749
 
        buf_get_total_list_len(&lru_len, &free_len, &flush_list_len);
4750
 
 
4751
 
        ratio = (100 * flush_list_len) / (1 + lru_len + free_len);
4752
 
  
 
3825
        ulint   ratio;
 
3826
 
 
3827
        buf_pool_mutex_enter();
 
3828
 
 
3829
        ratio = (100 * UT_LIST_GET_LEN(buf_pool->flush_list))
 
3830
                / (1 + UT_LIST_GET_LEN(buf_pool->LRU)
 
3831
                   + UT_LIST_GET_LEN(buf_pool->free));
 
3832
 
4753
3833
        /* 1 + is there to avoid division by zero */
4754
3834
 
 
3835
        buf_pool_mutex_exit();
 
3836
 
4755
3837
        return(ratio);
4756
3838
}
4757
3839
 
4758
3840
/*********************************************************************//**
4759
3841
Prints info of the buffer i/o. */
4760
 
static
 
3842
UNIV_INTERN
4761
3843
void
4762
 
buf_print_io_instance(
4763
 
/*==================*/
4764
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
4765
 
        FILE*           file)           /*!< in/out: buffer where to print */
 
3844
buf_print_io(
 
3845
/*=========*/
 
3846
        FILE*   file)   /*!< in/out: buffer where to print */
4766
3847
{
4767
3848
        time_t  current_time;
4768
3849
        double  time_elapsed;
4770
3851
 
4771
3852
        ut_ad(buf_pool);
4772
3853
 
4773
 
        buf_pool_mutex_enter(buf_pool);
4774
 
        buf_flush_list_mutex_enter(buf_pool);
 
3854
        buf_pool_mutex_enter();
4775
3855
 
4776
3856
        fprintf(file,
4777
3857
                "Buffer pool size   %lu\n"
4793
3873
                + buf_pool->init_flush[BUF_FLUSH_LIST],
4794
3874
                (ulong) buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]);
4795
3875
 
4796
 
        buf_flush_list_mutex_exit(buf_pool);
4797
 
 
4798
3876
        current_time = time(NULL);
4799
3877
        time_elapsed = 0.001 + difftime(current_time,
4800
3878
                                        buf_pool->last_printout_time);
4825
3903
                 - buf_pool->old_stat.n_pages_written)
4826
3904
                / time_elapsed);
4827
3905
 
4828
 
        n_gets_diff = buf_pool->stat.n_page_gets
4829
 
                    - buf_pool->old_stat.n_page_gets;
 
3906
        n_gets_diff = buf_pool->stat.n_page_gets - buf_pool->old_stat.n_page_gets;
4830
3907
 
4831
3908
        if (n_gets_diff) {
4832
3909
                fprintf(file,
4870
3947
                buf_LRU_stat_sum.io, buf_LRU_stat_cur.io,
4871
3948
                buf_LRU_stat_sum.unzip, buf_LRU_stat_cur.unzip);
4872
3949
 
4873
 
        buf_refresh_io_stats(buf_pool);
4874
 
        buf_pool_mutex_exit(buf_pool);
4875
 
}
4876
 
 
4877
 
/*********************************************************************//**
4878
 
Prints info of the buffer i/o. */
4879
 
UNIV_INTERN
4880
 
void
4881
 
buf_print_io(
4882
 
/*=========*/
4883
 
        FILE*   file)   /*!< in/out: buffer where to print */
4884
 
{
4885
 
        ulint   i;
4886
 
 
4887
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4888
 
                buf_pool_t*     buf_pool;
4889
 
 
4890
 
                buf_pool = buf_pool_from_array(i);
4891
 
                buf_print_io_instance(buf_pool, file);
4892
 
        }
 
3950
        buf_refresh_io_stats();
 
3951
        buf_pool_mutex_exit();
4893
3952
}
4894
3953
 
4895
3954
/**********************************************************************//**
4896
3955
Refreshes the statistics used to print per-second averages. */
4897
3956
UNIV_INTERN
4898
3957
void
4899
 
buf_refresh_io_stats(
4900
 
/*=================*/
4901
 
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
 
3958
buf_refresh_io_stats(void)
 
3959
/*======================*/
4902
3960
{
4903
 
        buf_pool->last_printout_time = ut_time();
 
3961
        buf_pool->last_printout_time = time(NULL);
4904
3962
        buf_pool->old_stat = buf_pool->stat;
4905
3963
}
4906
3964
 
4907
 
/**********************************************************************//**
4908
 
Refreshes the statistics used to print per-second averages. */
4909
 
UNIV_INTERN
4910
 
void
4911
 
buf_refresh_io_stats_all(void)
4912
 
/*==========================*/
4913
 
{
4914
 
        ulint           i;
4915
 
 
4916
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4917
 
                buf_pool_t*     buf_pool;
4918
 
 
4919
 
                buf_pool = buf_pool_from_array(i);
4920
 
 
4921
 
                buf_refresh_io_stats(buf_pool);
4922
 
        }
4923
 
}
4924
 
 
4925
 
/**********************************************************************//**
4926
 
Check if all pages in all buffer pools are in a replacable state.
4927
 
@return FALSE if not */
 
3965
/*********************************************************************//**
 
3966
Asserts that all file pages in the buffer are in a replaceable state.
 
3967
@return TRUE */
4928
3968
UNIV_INTERN
4929
3969
ibool
4930
3970
buf_all_freed(void)
4931
3971
/*===============*/
4932
3972
{
4933
 
        ulint   i;
4934
 
 
4935
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
4936
 
                buf_pool_t*     buf_pool;
4937
 
 
4938
 
                buf_pool = buf_pool_from_array(i);
4939
 
 
4940
 
                if (!buf_all_freed_instance(buf_pool)) {
4941
 
                        return(FALSE);
 
3973
        buf_chunk_t*    chunk;
 
3974
        ulint           i;
 
3975
 
 
3976
        ut_ad(buf_pool);
 
3977
 
 
3978
        buf_pool_mutex_enter();
 
3979
 
 
3980
        chunk = buf_pool->chunks;
 
3981
 
 
3982
        for (i = buf_pool->n_chunks; i--; chunk++) {
 
3983
 
 
3984
                const buf_block_t* block = buf_chunk_not_freed(chunk);
 
3985
 
 
3986
                if (UNIV_LIKELY_NULL(block)) {
 
3987
                        fprintf(stderr,
 
3988
                                "Page %lu %lu still fixed or dirty\n",
 
3989
                                (ulong) block->page.space,
 
3990
                                (ulong) block->page.offset);
 
3991
                        ut_error;
4942
3992
                }
4943
 
        }
 
3993
        }
 
3994
 
 
3995
        buf_pool_mutex_exit();
4944
3996
 
4945
3997
        return(TRUE);
4946
3998
}
4947
 
  
 
3999
 
4948
4000
/*********************************************************************//**
4949
4001
Checks that there currently are no pending i/o-operations for the buffer
4950
4002
pool.
4954
4006
buf_pool_check_no_pending_io(void)
4955
4007
/*==============================*/
4956
4008
{
4957
 
        ulint           i;
4958
 
        ibool           ret = TRUE;
4959
 
 
4960
 
        buf_pool_mutex_enter_all();
4961
 
 
4962
 
        for (i = 0; i < srv_buf_pool_instances && ret; i++) {
4963
 
                const buf_pool_t*       buf_pool;
4964
 
 
4965
 
                buf_pool = buf_pool_from_array(i);
4966
 
 
4967
 
                if (buf_pool->n_pend_reads
4968
 
                    + buf_pool->n_flush[BUF_FLUSH_LRU]
4969
 
                    + buf_pool->n_flush[BUF_FLUSH_LIST]
4970
 
                    + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]) {
4971
 
 
4972
 
                        ret = FALSE;
4973
 
                }
 
4009
        ibool   ret;
 
4010
 
 
4011
        buf_pool_mutex_enter();
 
4012
 
 
4013
        if (buf_pool->n_pend_reads + buf_pool->n_flush[BUF_FLUSH_LRU]
 
4014
            + buf_pool->n_flush[BUF_FLUSH_LIST]
 
4015
            + buf_pool->n_flush[BUF_FLUSH_SINGLE_PAGE]) {
 
4016
                ret = FALSE;
 
4017
        } else {
 
4018
                ret = TRUE;
4974
4019
        }
4975
4020
 
4976
 
        buf_pool_mutex_exit_all();
 
4021
        buf_pool_mutex_exit();
4977
4022
 
4978
4023
        return(ret);
4979
4024
}
4980
4025
 
4981
 
#if 0
4982
 
Code currently not used
4983
4026
/*********************************************************************//**
4984
4027
Gets the current length of the free list of buffer blocks.
4985
4028
@return length of the free list */
4990
4033
{
4991
4034
        ulint   len;
4992
4035
 
4993
 
        buf_pool_mutex_enter(buf_pool);
 
4036
        buf_pool_mutex_enter();
4994
4037
 
4995
4038
        len = UT_LIST_GET_LEN(buf_pool->free);
4996
4039
 
4997
 
        buf_pool_mutex_exit(buf_pool);
 
4040
        buf_pool_mutex_exit();
4998
4041
 
4999
4042
        return(len);
5000
4043
}
5001
 
#endif
5002
 
 
5003
4044
#else /* !UNIV_HOTBACKUP */
5004
4045
/********************************************************************//**
5005
4046
Inits a page to the buffer buf_pool, for use in ibbackup --restore. */