~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/buf/buf0lru.c

Merge initial InnoDB+ import.

This was applied by generating a patch between MySQL 5.1.50 InnoDB plugin and
the just-merged innodb+ from mysql-trunk revision-id: vasil.dimov@oracle.com-20100422110752-1zowoqxel5xx3z2e

Then, some manual merge resolving and it worked. This should make it much
easier to merge the rest of InnoDB 1.1 and 1.2 from the mysql tree using
my bzr-reapply script.

This takes us to InnoDB 1.1.1(ish).

Show diffs side-by-side

added added

removed removed

Lines of Context:
51
51
#include "srv0srv.h"
52
52
 
53
53
/** The number of blocks from the LRU_old pointer onward, including
54
 
the block pointed to, must be buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
 
54
the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
55
55
of the whole LRU list length, except that the tolerance defined below
56
56
is allowed. Note that the tolerance must be small enough such that for
57
57
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
97
97
#define BUF_LRU_IO_TO_UNZIP_FACTOR 50
98
98
 
99
99
/** Sampled values buf_LRU_stat_cur.
100
 
Protected by buf_pool_mutex.  Updated by buf_LRU_stat_update(). */
 
100
Not protected by any mutex.  Updated by buf_LRU_stat_update(). */
101
101
static buf_LRU_stat_t           buf_LRU_stat_arr[BUF_LRU_STAT_N_INTERVAL];
 
102
 
102
103
/** Cursor to buf_LRU_stat_arr[] that is updated in a round-robin fashion. */
103
104
static ulint                    buf_LRU_stat_arr_ind;
104
105
 
107
108
UNIV_INTERN buf_LRU_stat_t      buf_LRU_stat_cur;
108
109
 
109
110
/** Running sum of past values of buf_LRU_stat_cur.
110
 
Updated by buf_LRU_stat_update().  Protected by buf_pool_mutex. */
 
111
Updated by buf_LRU_stat_update().  Not Protected by any mutex. */
111
112
UNIV_INTERN buf_LRU_stat_t      buf_LRU_stat_sum;
112
113
 
113
114
/* @} */
114
115
 
115
116
/** @name Heuristics for detecting index scan @{ */
116
 
/** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for
117
 
"old" blocks.  Protected by buf_pool_mutex. */
118
 
UNIV_INTERN uint        buf_LRU_old_ratio;
119
117
/** Move blocks to "new" LRU list only if the first access was at
120
118
least this many milliseconds ago.  Not protected by any mutex or latch. */
121
119
UNIV_INTERN uint        buf_LRU_old_threshold_ms;
124
122
/******************************************************************//**
125
123
Takes a block out of the LRU list and page hash table.
126
124
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
127
 
the object will be freed and buf_pool_zip_mutex will be released.
 
125
the object will be freed and buf_pool->zip_mutex will be released.
128
126
 
129
127
If a compressed page or a compressed-only block descriptor is freed,
130
128
other compressed pages or compressed-only block descriptors may be
155
153
@return TRUE if should use unzip_LRU */
156
154
UNIV_INLINE
157
155
ibool
158
 
buf_LRU_evict_from_unzip_LRU(void)
159
 
/*==============================*/
 
156
buf_LRU_evict_from_unzip_LRU(
 
157
/*=========================*/
 
158
        buf_pool_t*     buf_pool)
160
159
{
161
160
        ulint   io_avg;
162
161
        ulint   unzip_avg;
163
162
 
164
 
        ut_ad(buf_pool_mutex_own());
 
163
        ut_ad(buf_pool_mutex_own(buf_pool));
165
164
 
166
165
        /* If the unzip_LRU list is empty, we can only use the LRU. */
167
166
        if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
229
228
void
230
229
buf_LRU_drop_page_hash_for_tablespace(
231
230
/*==================================*/
232
 
        ulint   id)     /*!< in: space id */
 
231
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
 
232
        ulint           id)             /*!< in: space id */
233
233
{
234
234
        buf_page_t*     bpage;
235
235
        ulint*          page_arr;
244
244
                return;
245
245
        }
246
246
 
247
 
        page_arr = ut_malloc(sizeof(ulint)
248
 
                             * BUF_LRU_DROP_SEARCH_HASH_SIZE);
249
 
        buf_pool_mutex_enter();
 
247
        page_arr = ut_malloc(
 
248
                sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE);
 
249
 
 
250
        buf_pool_mutex_enter(buf_pool);
250
251
 
251
252
scan_again:
252
253
        num_entries = 0;
284
285
                        if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
285
286
                                goto next_page;
286
287
                        }
287
 
                        /* Array full. We release the buf_pool_mutex to
 
288
 
 
289
                        /* Array full. We release the buf_pool->mutex to
288
290
                        obey the latching order. */
289
 
                        buf_pool_mutex_exit();
290
 
 
291
 
                        buf_LRU_drop_page_hash_batch(id, zip_size, page_arr,
292
 
                                                     num_entries);
 
291
                        buf_pool_mutex_exit(buf_pool);
 
292
 
 
293
                        buf_LRU_drop_page_hash_batch(
 
294
                                id, zip_size, page_arr, num_entries);
 
295
 
293
296
                        num_entries = 0;
294
 
                        buf_pool_mutex_enter();
 
297
 
 
298
                        buf_pool_mutex_enter(buf_pool);
295
299
                } else {
296
300
                        mutex_exit(block_mutex);
297
301
                }
316
320
                }
317
321
        }
318
322
 
319
 
        buf_pool_mutex_exit();
 
323
        buf_pool_mutex_exit(buf_pool);
320
324
 
321
325
        /* Drop any remaining batch of search hashed pages. */
322
326
        buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
324
328
}
325
329
 
326
330
/******************************************************************//**
327
 
Invalidates all pages belonging to a given tablespace when we are deleting
328
 
the data file(s) of that tablespace. */
329
 
UNIV_INTERN
 
331
Invalidates all pages belonging to a given tablespace inside a specific
 
332
buffer pool instance when we are deleting the data file(s) of that
 
333
tablespace. */
 
334
static
330
335
void
331
 
buf_LRU_invalidate_tablespace(
332
 
/*==========================*/
333
 
        ulint   id)     /*!< in: space id */
 
336
buf_LRU_invalidate_tablespace_buf_pool_instance(
 
337
/*============================================*/
 
338
        buf_pool_t*     buf_pool,       /*!< buffer pool instance */
 
339
        ulint           id)             /*!< in: space id */
334
340
{
335
341
        buf_page_t*     bpage;
336
342
        ibool           all_freed;
337
343
 
338
 
        /* Before we attempt to drop pages one by one we first
339
 
        attempt to drop page hash index entries in batches to make
340
 
        it more efficient. The batching attempt is a best effort
341
 
        attempt and does not guarantee that all pages hash entries
342
 
        will be dropped. We get rid of remaining page hash entries
343
 
        one by one below. */
344
 
        buf_LRU_drop_page_hash_for_tablespace(id);
345
 
 
346
344
scan_again:
347
 
        buf_pool_mutex_enter();
 
345
        buf_pool_mutex_enter(buf_pool);
348
346
 
349
347
        all_freed = TRUE;
350
348
 
418
416
                                                buf_pool_zip_mutex, it is
419
417
                                                not necessary to acquire
420
418
                                                further mutexes. */
421
 
                                                ut_ad(&buf_pool_zip_mutex
 
419
                                                ut_ad(&buf_pool->zip_mutex
422
420
                                                      == block_mutex);
423
421
                                                ut_ad(mutex_own(block_mutex));
424
422
                                                prev_bpage_buf_fix = TRUE;
432
430
                                ulint   page_no;
433
431
                                ulint   zip_size;
434
432
 
435
 
                                buf_pool_mutex_exit();
 
433
                                buf_pool_mutex_exit(buf_pool);
436
434
 
437
435
                                zip_size = buf_page_get_zip_size(bpage);
438
436
                                page_no = buf_page_get_page_no(bpage);
462
460
                                /* The block_mutex should have been
463
461
                                released by buf_LRU_block_remove_hashed_page()
464
462
                                when it returns BUF_BLOCK_ZIP_FREE. */
465
 
                                ut_ad(block_mutex == &buf_pool_zip_mutex);
 
463
                                ut_ad(block_mutex == &buf_pool->zip_mutex);
466
464
                                ut_ad(!mutex_own(block_mutex));
467
465
 
468
466
                                if (prev_bpage_buf_fix) {
489
487
                bpage = prev_bpage;
490
488
        }
491
489
 
492
 
        buf_pool_mutex_exit();
 
490
        buf_pool_mutex_exit(buf_pool);
493
491
 
494
492
        if (!all_freed) {
495
493
                os_thread_sleep(20000);
498
496
        }
499
497
}
500
498
 
 
499
/******************************************************************//**
 
500
Invalidates all pages belonging to a given tablespace when we are deleting
 
501
the data file(s) of that tablespace. */
 
502
UNIV_INTERN
 
503
void
 
504
buf_LRU_invalidate_tablespace(
 
505
/*==========================*/
 
506
        ulint   id)     /*!< in: space id */
 
507
{
 
508
        ulint   i;
 
509
 
 
510
        /* Before we attempt to drop pages one by one we first
 
511
        attempt to drop page hash index entries in batches to make
 
512
        it more efficient. The batching attempt is a best effort
 
513
        attempt and does not guarantee that all pages hash entries
 
514
        will be dropped. We get rid of remaining page hash entries
 
515
        one by one below. */
 
516
        for (i = 0; i < srv_buf_pool_instances; i++) {
 
517
                buf_pool_t*     buf_pool;
 
518
 
 
519
                buf_pool = buf_pool_from_array(i);
 
520
                buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
 
521
                buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
 
522
        }
 
523
}
 
524
 
501
525
/********************************************************************//**
502
526
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
503
527
UNIV_INTERN
507
531
        buf_page_t*     bpage)  /*!< in: pointer to the block in question */
508
532
{
509
533
        buf_page_t*     b;
 
534
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
510
535
 
511
 
        ut_ad(buf_pool_mutex_own());
 
536
        ut_ad(buf_pool_mutex_own(buf_pool));
512
537
        ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
513
538
 
514
539
        /* Find the first successor of bpage in the LRU list
538
563
ibool
539
564
buf_LRU_free_from_unzip_LRU_list(
540
565
/*=============================*/
541
 
        ulint   n_iterations)   /*!< in: how many times this has been called
542
 
                                repeatedly without result: a high value means
543
 
                                that we should search farther; we will search
544
 
                                n_iterations / 5 of the unzip_LRU list,
545
 
                                or nothing if n_iterations >= 5 */
 
566
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
 
567
        ulint           n_iterations)   /*!< in: how many times this has
 
568
                                        been called repeatedly without
 
569
                                        result: a high value means that
 
570
                                        we should search farther; we will
 
571
                                        search n_iterations / 5 of the
 
572
                                        unzip_LRU list, or nothing if
 
573
                                        n_iterations >= 5 */
546
574
{
547
575
        buf_block_t*    block;
548
576
        ulint           distance;
549
577
 
550
 
        ut_ad(buf_pool_mutex_own());
 
578
        ut_ad(buf_pool_mutex_own(buf_pool));
551
579
 
552
580
        /* Theoratically it should be much easier to find a victim
553
581
        from unzip_LRU as we can choose even a dirty block (as we'll
557
585
        if we have done five iterations so far. */
558
586
 
559
587
        if (UNIV_UNLIKELY(n_iterations >= 5)
560
 
            || !buf_LRU_evict_from_unzip_LRU()) {
 
588
            || !buf_LRU_evict_from_unzip_LRU(buf_pool)) {
561
589
 
562
590
                return(FALSE);
563
591
        }
609
637
ibool
610
638
buf_LRU_free_from_common_LRU_list(
611
639
/*==============================*/
612
 
        ulint   n_iterations)   /*!< in: how many times this has been called
 
640
        buf_pool_t*     buf_pool,
 
641
        ulint           n_iterations)
 
642
                                /*!< in: how many times this has been called
613
643
                                repeatedly without result: a high value means
614
644
                                that we should search farther; if
615
645
                                n_iterations < 10, then we search
619
649
        buf_page_t*     bpage;
620
650
        ulint           distance;
621
651
 
622
 
        ut_ad(buf_pool_mutex_own());
 
652
        ut_ad(buf_pool_mutex_own(buf_pool));
623
653
 
624
654
        distance = 100 + (n_iterations * buf_pool->curr_size) / 10;
625
655
 
676
706
ibool
677
707
buf_LRU_search_and_free_block(
678
708
/*==========================*/
679
 
        ulint   n_iterations)   /*!< in: how many times this has been called
 
709
        buf_pool_t*     buf_pool,
 
710
                                /*!< in: buffer pool instance */
 
711
        ulint           n_iterations)
 
712
                                /*!< in: how many times this has been called
680
713
                                repeatedly without result: a high value means
681
714
                                that we should search farther; if
682
715
                                n_iterations < 10, then we search
687
720
{
688
721
        ibool   freed = FALSE;
689
722
 
690
 
        buf_pool_mutex_enter();
 
723
        buf_pool_mutex_enter(buf_pool);
691
724
 
692
 
        freed = buf_LRU_free_from_unzip_LRU_list(n_iterations);
 
725
        freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations);
693
726
 
694
727
        if (!freed) {
695
 
                freed = buf_LRU_free_from_common_LRU_list(n_iterations);
 
728
                freed = buf_LRU_free_from_common_LRU_list(
 
729
                        buf_pool, n_iterations);
696
730
        }
697
731
 
698
732
        if (!freed) {
701
735
                buf_pool->LRU_flush_ended--;
702
736
        }
703
737
 
704
 
        buf_pool_mutex_exit();
 
738
        buf_pool_mutex_exit(buf_pool);
705
739
 
706
740
        return(freed);
707
741
}
716
750
wasted. */
717
751
UNIV_INTERN
718
752
void
719
 
buf_LRU_try_free_flushed_blocks(void)
720
 
/*=================================*/
 
753
buf_LRU_try_free_flushed_blocks(
 
754
/*============================*/
 
755
        buf_pool_t*     buf_pool)               /*!< in: buffer pool instance */
721
756
{
722
 
        buf_pool_mutex_enter();
723
 
 
724
 
        while (buf_pool->LRU_flush_ended > 0) {
725
 
 
726
 
                buf_pool_mutex_exit();
727
 
 
728
 
                buf_LRU_search_and_free_block(1);
729
 
 
730
 
                buf_pool_mutex_enter();
 
757
 
 
758
        if (buf_pool == NULL) {
 
759
                ulint   i;
 
760
 
 
761
                for (i = 0; i < srv_buf_pool_instances; i++) {
 
762
                        buf_pool = buf_pool_from_array(i);
 
763
                        buf_LRU_try_free_flushed_blocks(buf_pool);
 
764
                }
 
765
        } else {
 
766
                buf_pool_mutex_enter(buf_pool);
 
767
 
 
768
                while (buf_pool->LRU_flush_ended > 0) {
 
769
 
 
770
                        buf_pool_mutex_exit(buf_pool);
 
771
 
 
772
                        buf_LRU_search_and_free_block(buf_pool, 1);
 
773
 
 
774
                        buf_pool_mutex_enter(buf_pool);
 
775
                }
 
776
 
 
777
                buf_pool_mutex_exit(buf_pool);
731
778
        }
732
 
 
733
 
        buf_pool_mutex_exit();
734
779
}
735
780
 
736
781
/******************************************************************//**
737
 
Returns TRUE if less than 25 % of the buffer pool is available. This can be
738
 
used in heuristics to prevent huge transactions eating up the whole buffer
739
 
pool for their locks.
 
782
Returns TRUE if less than 25 % of the buffer pool in any instance is
 
783
available. This can be used in heuristics to prevent huge transactions
 
784
eating up the whole buffer pool for their locks.
740
785
@return TRUE if less than 25 % of buffer pool left */
741
786
UNIV_INTERN
742
787
ibool
743
788
buf_LRU_buf_pool_running_out(void)
744
789
/*==============================*/
745
790
{
746
 
        ibool   ret     = FALSE;
747
 
 
748
 
        buf_pool_mutex_enter();
749
 
 
750
 
        if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
751
 
            + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) {
752
 
 
753
 
                ret = TRUE;
 
791
        ulint   i;
 
792
        ibool   ret = FALSE;
 
793
 
 
794
        for (i = 0; i < srv_buf_pool_instances && !ret; i++) {
 
795
                buf_pool_t*     buf_pool;
 
796
 
 
797
                buf_pool = buf_pool_from_array(i);
 
798
 
 
799
                buf_pool_mutex_enter(buf_pool);
 
800
 
 
801
                if (!recv_recovery_on
 
802
                    && UT_LIST_GET_LEN(buf_pool->free)
 
803
                       + UT_LIST_GET_LEN(buf_pool->LRU)
 
804
                       < buf_pool->curr_size / 4) {
 
805
 
 
806
                        ret = TRUE;
 
807
                }
 
808
 
 
809
                buf_pool_mutex_exit(buf_pool);
754
810
        }
755
811
 
756
 
        buf_pool_mutex_exit();
757
 
 
758
812
        return(ret);
759
813
}
760
814
 
764
818
@return a free control block, or NULL if the buf_block->free list is empty */
765
819
UNIV_INTERN
766
820
buf_block_t*
767
 
buf_LRU_get_free_only(void)
768
 
/*=======================*/
 
821
buf_LRU_get_free_only(
 
822
/*==================*/
 
823
        buf_pool_t*     buf_pool)
769
824
{
770
825
        buf_block_t*    block;
771
826
 
772
 
        ut_ad(buf_pool_mutex_own());
 
827
        ut_ad(buf_pool_mutex_own(buf_pool));
773
828
 
774
829
        block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
775
830
 
776
831
        if (block) {
 
832
 
777
833
                ut_ad(block->page.in_free_list);
778
834
                ut_d(block->page.in_free_list = FALSE);
779
835
                ut_ad(!block->page.in_flush_list);
786
842
                buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE);
787
843
                UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
788
844
 
 
845
                ut_ad(buf_pool_from_block(block) == buf_pool);
 
846
 
789
847
                mutex_exit(&block->mutex);
790
848
        }
791
849
 
801
859
buf_block_t*
802
860
buf_LRU_get_free_block(
803
861
/*===================*/
804
 
        ulint   zip_size)       /*!< in: compressed page size in bytes,
805
 
                                or 0 if uncompressed tablespace */
 
862
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
 
863
        ulint           zip_size)       /*!< in: compressed page size in bytes,
 
864
                                        or 0 if uncompressed tablespace */
806
865
{
807
866
        buf_block_t*    block           = NULL;
808
867
        ibool           freed;
810
869
        ibool           mon_value_was   = FALSE;
811
870
        ibool           started_monitor = FALSE;
812
871
loop:
813
 
        buf_pool_mutex_enter();
 
872
        buf_pool_mutex_enter(buf_pool);
814
873
 
815
874
        if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
816
875
            + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
877
936
        }
878
937
 
879
938
        /* If there is a block in the free list, take it */
880
 
        block = buf_LRU_get_free_only();
 
939
        block = buf_LRU_get_free_only(buf_pool);
881
940
        if (block) {
882
941
 
 
942
                ut_ad(buf_pool_from_block(block) == buf_pool);
 
943
 
883
944
#ifdef UNIV_DEBUG
884
945
                block->page.zip.m_start =
885
946
#endif /* UNIV_DEBUG */
890
951
                if (UNIV_UNLIKELY(zip_size)) {
891
952
                        ibool   lru;
892
953
                        page_zip_set_size(&block->page.zip, zip_size);
893
 
                        block->page.zip.data = buf_buddy_alloc(zip_size, &lru);
 
954
 
 
955
                        block->page.zip.data = buf_buddy_alloc(
 
956
                                buf_pool, zip_size, &lru);
 
957
 
894
958
                        UNIV_MEM_DESC(block->page.zip.data, zip_size, block);
895
959
                } else {
896
960
                        page_zip_set_size(&block->page.zip, 0);
897
961
                        block->page.zip.data = NULL;
898
962
                }
899
963
 
900
 
                buf_pool_mutex_exit();
 
964
                buf_pool_mutex_exit(buf_pool);
901
965
 
902
966
                if (started_monitor) {
903
967
                        srv_print_innodb_monitor = mon_value_was;
909
973
        /* If no block was in the free list, search from the end of the LRU
910
974
        list and try to free a block there */
911
975
 
912
 
        buf_pool_mutex_exit();
 
976
        buf_pool_mutex_exit(buf_pool);
913
977
 
914
 
        freed = buf_LRU_search_and_free_block(n_iterations);
 
978
        freed = buf_LRU_search_and_free_block(buf_pool, n_iterations);
915
979
 
916
980
        if (freed > 0) {
917
981
                goto loop;
953
1017
 
954
1018
        /* No free block was found: try to flush the LRU list */
955
1019
 
956
 
        buf_flush_free_margin();
 
1020
        buf_flush_free_margin(buf_pool);
957
1021
        ++srv_buf_pool_wait_free;
958
1022
 
959
1023
        os_aio_simulated_wake_handler_threads();
960
1024
 
961
 
        buf_pool_mutex_enter();
 
1025
        buf_pool_mutex_enter(buf_pool);
962
1026
 
963
1027
        if (buf_pool->LRU_flush_ended > 0) {
964
1028
                /* We have written pages in an LRU flush. To make the insert
965
1029
                buffer more efficient, we try to move these pages to the free
966
1030
                list. */
967
1031
 
968
 
                buf_pool_mutex_exit();
 
1032
                buf_pool_mutex_exit(buf_pool);
969
1033
 
970
 
                buf_LRU_try_free_flushed_blocks();
 
1034
                buf_LRU_try_free_flushed_blocks(buf_pool);
971
1035
        } else {
972
 
                buf_pool_mutex_exit();
 
1036
                buf_pool_mutex_exit(buf_pool);
973
1037
        }
974
1038
 
975
1039
        if (n_iterations > 10) {
987
1051
is inside the allowed limits. */
988
1052
UNIV_INLINE
989
1053
void
990
 
buf_LRU_old_adjust_len(void)
991
 
/*========================*/
 
1054
buf_LRU_old_adjust_len(
 
1055
/*===================*/
 
1056
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
992
1057
{
993
1058
        ulint   old_len;
994
1059
        ulint   new_len;
995
1060
 
996
1061
        ut_a(buf_pool->LRU_old);
997
 
        ut_ad(buf_pool_mutex_own());
998
 
        ut_ad(buf_LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
999
 
        ut_ad(buf_LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
 
1062
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1063
        ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
 
1064
        ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
1000
1065
#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
1001
1066
# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)"
1002
1067
#endif
1012
1077
 
1013
1078
        old_len = buf_pool->LRU_old_len;
1014
1079
        new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1015
 
                         * buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
 
1080
                         * buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
1016
1081
                         UT_LIST_GET_LEN(buf_pool->LRU)
1017
1082
                         - (BUF_LRU_OLD_TOLERANCE
1018
1083
                            + BUF_LRU_NON_OLD_MIN_LEN));
1054
1119
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
1055
1120
static
1056
1121
void
1057
 
buf_LRU_old_init(void)
1058
 
/*==================*/
 
1122
buf_LRU_old_init(
 
1123
/*=============*/
 
1124
        buf_pool_t*     buf_pool)
1059
1125
{
1060
1126
        buf_page_t*     bpage;
1061
1127
 
1062
 
        ut_ad(buf_pool_mutex_own());
 
1128
        ut_ad(buf_pool_mutex_own(buf_pool));
1063
1129
        ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
1064
1130
 
1065
1131
        /* We first initialize all blocks in the LRU list as old and then use
1078
1144
        buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
1079
1145
        buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
1080
1146
 
1081
 
        buf_LRU_old_adjust_len();
 
1147
        buf_LRU_old_adjust_len(buf_pool);
1082
1148
}
1083
1149
 
1084
1150
/******************************************************************//**
1089
1155
/*=================================*/
1090
1156
        buf_page_t*     bpage)  /*!< in/out: control block */
1091
1157
{
 
1158
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
 
1159
 
1092
1160
        ut_ad(buf_pool);
1093
1161
        ut_ad(bpage);
1094
1162
        ut_ad(buf_page_in_file(bpage));
1095
 
        ut_ad(buf_pool_mutex_own());
 
1163
        ut_ad(buf_pool_mutex_own(buf_pool));
1096
1164
 
1097
1165
        if (buf_page_belongs_to_unzip_LRU(bpage)) {
1098
1166
                buf_block_t*    block = (buf_block_t*) bpage;
1112
1180
/*=================*/
1113
1181
        buf_page_t*     bpage)  /*!< in: control block */
1114
1182
{
 
1183
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
 
1184
 
1115
1185
        ut_ad(buf_pool);
1116
1186
        ut_ad(bpage);
1117
 
        ut_ad(buf_pool_mutex_own());
 
1187
        ut_ad(buf_pool_mutex_own(buf_pool));
1118
1188
 
1119
1189
        ut_a(buf_page_in_file(bpage));
1120
1190
 
1128
1198
                /* Below: the previous block is guaranteed to exist,
1129
1199
                because the LRU_old pointer is only allowed to differ
1130
1200
                by BUF_LRU_OLD_TOLERANCE from strict
1131
 
                buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
 
1201
                buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
1132
1202
                list length. */
1133
1203
                buf_page_t*     prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
1134
1204
 
1174
1244
        }
1175
1245
 
1176
1246
        /* Adjust the length of the old block list if necessary */
1177
 
        buf_LRU_old_adjust_len();
 
1247
        buf_LRU_old_adjust_len(buf_pool);
1178
1248
}
1179
1249
 
1180
1250
/******************************************************************//**
1187
1257
        ibool           old)    /*!< in: TRUE if should be put to the end
1188
1258
                                of the list, else put to the start */
1189
1259
{
 
1260
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
 
1261
 
1190
1262
        ut_ad(buf_pool);
1191
1263
        ut_ad(block);
1192
 
        ut_ad(buf_pool_mutex_own());
 
1264
        ut_ad(buf_pool_mutex_own(buf_pool));
1193
1265
 
1194
1266
        ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
1195
1267
 
1211
1283
/*=========================*/
1212
1284
        buf_page_t*     bpage)  /*!< in: control block */
1213
1285
{
 
1286
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
 
1287
 
1214
1288
        ut_ad(buf_pool);
1215
1289
        ut_ad(bpage);
1216
 
        ut_ad(buf_pool_mutex_own());
 
1290
        ut_ad(buf_pool_mutex_own(buf_pool));
1217
1291
 
1218
1292
        ut_a(buf_page_in_file(bpage));
1219
1293
 
1229
1303
 
1230
1304
                buf_page_set_old(bpage, TRUE);
1231
1305
                buf_pool->LRU_old_len++;
1232
 
                buf_LRU_old_adjust_len();
 
1306
                buf_LRU_old_adjust_len(buf_pool);
1233
1307
 
1234
1308
        } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1235
1309
 
1236
1310
                /* The LRU list is now long enough for LRU_old to become
1237
1311
                defined: init it */
1238
1312
 
1239
 
                buf_LRU_old_init();
 
1313
                buf_LRU_old_init(buf_pool);
1240
1314
        } else {
1241
1315
                buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1242
1316
        }
1260
1334
                                LRU list is very short, the block is added to
1261
1335
                                the start, regardless of this parameter */
1262
1336
{
 
1337
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
 
1338
 
1263
1339
        ut_ad(buf_pool);
1264
1340
        ut_ad(bpage);
1265
 
        ut_ad(buf_pool_mutex_own());
 
1341
        ut_ad(buf_pool_mutex_own(buf_pool));
1266
1342
 
1267
1343
        ut_a(buf_page_in_file(bpage));
1268
1344
        ut_ad(!bpage->in_LRU_list);
1296
1372
                /* Adjust the length of the old block list if necessary */
1297
1373
 
1298
1374
                buf_page_set_old(bpage, old);
1299
 
                buf_LRU_old_adjust_len();
 
1375
                buf_LRU_old_adjust_len(buf_pool);
1300
1376
 
1301
1377
        } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1302
1378
 
1303
1379
                /* The LRU list is now long enough for LRU_old to become
1304
1380
                defined: init it */
1305
1381
 
1306
 
                buf_LRU_old_init();
 
1382
                buf_LRU_old_init(buf_pool);
1307
1383
        } else {
1308
1384
                buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1309
1385
        }
1339
1415
/*=====================*/
1340
1416
        buf_page_t*     bpage)  /*!< in: control block */
1341
1417
{
1342
 
        ut_ad(buf_pool_mutex_own());
 
1418
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
 
1419
 
 
1420
        ut_ad(buf_pool_mutex_own(buf_pool));
1343
1421
 
1344
1422
        if (bpage->old) {
1345
1423
                buf_pool->stat.n_pages_made_young++;
1365
1443
Try to free a block.  If bpage is a descriptor of a compressed-only
1366
1444
page, the descriptor object will be freed as well.
1367
1445
 
1368
 
NOTE: If this function returns BUF_LRU_FREED, it will temporarily
1369
 
release buf_pool_mutex.  Furthermore, the page frame will no longer be
 
1446
NOTE: If this function returns BUF_LRU_FREED, it will not temporarily
 
1447
release buf_pool->mutex.  Furthermore, the page frame will no longer be
1370
1448
accessible via bpage.
1371
1449
 
1372
 
The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and
 
1450
The caller must hold buf_pool->mutex and buf_page_get_mutex(bpage) and
1373
1451
release these two mutexes after the call.  No other
1374
1452
buf_page_get_mutex() may be held when calling this function.
1375
1453
@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or
1383
1461
                                compressed page of an uncompressed page */
1384
1462
        ibool*          buf_pool_mutex_released)
1385
1463
                                /*!< in: pointer to a variable that will
1386
 
                                be assigned TRUE if buf_pool_mutex
 
1464
                                be assigned TRUE if buf_pool->mutex
1387
1465
                                was temporarily released, or NULL */
1388
1466
{
1389
1467
        buf_page_t*     b = NULL;
 
1468
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1390
1469
        mutex_t*        block_mutex = buf_page_get_mutex(bpage);
1391
1470
 
1392
 
        ut_ad(buf_pool_mutex_own());
 
1471
        ut_ad(buf_pool_mutex_own(buf_pool));
1393
1472
        ut_ad(mutex_own(block_mutex));
1394
1473
        ut_ad(buf_page_in_file(bpage));
1395
1474
        ut_ad(bpage->in_LRU_list);
1396
1475
        ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
1397
 
#if UNIV_WORD_SIZE == 4
1398
 
        /* On 32-bit systems, there is no padding in buf_page_t.  On
1399
 
        other systems, Valgrind could complain about uninitialized pad
1400
 
        bytes. */
1401
1476
        UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1402
 
#endif
1403
1477
 
1404
1478
        if (!buf_page_can_relocate(bpage)) {
1405
1479
 
1433
1507
                If it cannot be allocated (without freeing a block
1434
1508
                from the LRU list), refuse to free bpage. */
1435
1509
alloc:
1436
 
                buf_pool_mutex_exit_forbid();
1437
 
                b = buf_buddy_alloc(sizeof *b, NULL);
1438
 
                buf_pool_mutex_exit_allow();
 
1510
                buf_pool_mutex_exit_forbid(buf_pool);
 
1511
                b = buf_buddy_alloc(buf_pool, sizeof *b, NULL);
 
1512
                buf_pool_mutex_exit_allow(buf_pool);
1439
1513
 
1440
1514
                if (UNIV_UNLIKELY(!b)) {
1441
1515
                        return(BUF_LRU_CANNOT_RELOCATE);
1457
1531
                ut_a(bpage->buf_fix_count == 0);
1458
1532
 
1459
1533
                if (b) {
 
1534
                        buf_page_t*     hash_b;
1460
1535
                        buf_page_t*     prev_b  = UT_LIST_GET_PREV(LRU, b);
1461
 
                        const ulint     fold    = buf_page_address_fold(
 
1536
 
 
1537
                        const ulint     fold = buf_page_address_fold(
1462
1538
                                bpage->space, bpage->offset);
1463
1539
 
1464
 
                        ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
 
1540
                        hash_b  = buf_page_hash_get_low(
 
1541
                                buf_pool, bpage->space, bpage->offset, fold);
 
1542
 
 
1543
                        ut_a(!hash_b);
1465
1544
 
1466
1545
                        b->state = b->oldest_modification
1467
1546
                                ? BUF_BLOCK_ZIP_DIRTY
1495
1574
 
1496
1575
                                ut_ad(prev_b->in_LRU_list);
1497
1576
                                ut_ad(buf_page_in_file(prev_b));
1498
 
#if UNIV_WORD_SIZE == 4
1499
 
                                /* On 32-bit systems, there is no
1500
 
                                padding in buf_page_t.  On other
1501
 
                                systems, Valgrind could complain about
1502
 
                                uninitialized pad bytes. */
1503
1577
                                UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b);
1504
 
#endif
 
1578
 
1505
1579
                                UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
1506
1580
                                                     prev_b, b);
1507
1581
 
1521
1595
                                        ut_ad(buf_pool->LRU_old);
1522
1596
                                        /* Adjust the length of the
1523
1597
                                        old block list if necessary */
1524
 
                                        buf_LRU_old_adjust_len();
 
1598
                                        buf_LRU_old_adjust_len(buf_pool);
1525
1599
                                } else if (lru_len == BUF_LRU_OLD_MIN_LEN) {
1526
1600
                                        /* The LRU list is now long
1527
1601
                                        enough for LRU_old to become
1528
1602
                                        defined: init it */
1529
 
                                        buf_LRU_old_init();
 
1603
                                        buf_LRU_old_init(buf_pool);
1530
1604
                                }
1531
1605
#ifdef UNIV_LRU_DEBUG
1532
1606
                                /* Check that the "old" flag is consistent
1550
1624
 
1551
1625
                        /* Prevent buf_page_get_gen() from
1552
1626
                        decompressing the block while we release
1553
 
                        buf_pool_mutex and block_mutex. */
 
1627
                        buf_pool->mutex and block_mutex. */
1554
1628
                        b->buf_fix_count++;
1555
1629
                        b->io_fix = BUF_IO_READ;
1556
1630
                }
1559
1633
                        *buf_pool_mutex_released = TRUE;
1560
1634
                }
1561
1635
 
1562
 
                buf_pool_mutex_exit();
 
1636
                buf_pool_mutex_exit(buf_pool);
1563
1637
                mutex_exit(block_mutex);
1564
1638
 
1565
1639
                /* Remove possible adaptive hash index on the page.
1591
1665
                                : BUF_NO_CHECKSUM_MAGIC);
1592
1666
                }
1593
1667
 
1594
 
                buf_pool_mutex_enter();
 
1668
                buf_pool_mutex_enter(buf_pool);
1595
1669
                mutex_enter(block_mutex);
1596
1670
 
1597
1671
                if (b) {
1598
 
                        mutex_enter(&buf_pool_zip_mutex);
 
1672
                        mutex_enter(&buf_pool->zip_mutex);
1599
1673
                        b->buf_fix_count--;
1600
1674
                        buf_page_set_io_fix(b, BUF_IO_NONE);
1601
 
                        mutex_exit(&buf_pool_zip_mutex);
 
1675
                        mutex_exit(&buf_pool->zip_mutex);
1602
1676
                }
1603
1677
 
1604
1678
                buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
1606
1680
                /* The block_mutex should have been released by
1607
1681
                buf_LRU_block_remove_hashed_page() when it returns
1608
1682
                BUF_BLOCK_ZIP_FREE. */
1609
 
                ut_ad(block_mutex == &buf_pool_zip_mutex);
 
1683
                ut_ad(block_mutex == &buf_pool->zip_mutex);
1610
1684
                mutex_enter(block_mutex);
1611
1685
        }
1612
1686
 
1621
1695
/*=============================*/
1622
1696
        buf_block_t*    block)  /*!< in: block, must not contain a file page */
1623
1697
{
1624
 
        void*   data;
 
1698
        void*           data;
 
1699
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
1625
1700
 
1626
1701
        ut_ad(block);
1627
 
        ut_ad(buf_pool_mutex_own());
 
1702
        ut_ad(buf_pool_mutex_own(buf_pool));
1628
1703
        ut_ad(mutex_own(&block->mutex));
1629
1704
 
1630
1705
        switch (buf_block_get_state(block)) {
1658
1733
        if (data) {
1659
1734
                block->page.zip.data = NULL;
1660
1735
                mutex_exit(&block->mutex);
1661
 
                buf_pool_mutex_exit_forbid();
1662
 
                buf_buddy_free(data, page_zip_get_size(&block->page.zip));
1663
 
                buf_pool_mutex_exit_allow();
 
1736
                buf_pool_mutex_exit_forbid(buf_pool);
 
1737
 
 
1738
                buf_buddy_free(
 
1739
                        buf_pool, data, page_zip_get_size(&block->page.zip));
 
1740
 
 
1741
                buf_pool_mutex_exit_allow(buf_pool);
1664
1742
                mutex_enter(&block->mutex);
1665
1743
                page_zip_set_size(&block->page.zip, 0);
1666
1744
        }
1674
1752
/******************************************************************//**
1675
1753
Takes a block out of the LRU list and page hash table.
1676
1754
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
1677
 
the object will be freed and buf_pool_zip_mutex will be released.
 
1755
the object will be freed and buf_pool->zip_mutex will be released.
1678
1756
 
1679
1757
If a compressed page or a compressed-only block descriptor is freed,
1680
1758
other compressed pages or compressed-only block descriptors may be
1691
1769
        ibool           zip)    /*!< in: TRUE if should remove also the
1692
1770
                                compressed page of an uncompressed page */
1693
1771
{
 
1772
        ulint                   fold;
1694
1773
        const buf_page_t*       hashed_bpage;
 
1774
        buf_pool_t*             buf_pool = buf_pool_from_bpage(bpage);
 
1775
 
1695
1776
        ut_ad(bpage);
1696
 
        ut_ad(buf_pool_mutex_own());
 
1777
        ut_ad(buf_pool_mutex_own(buf_pool));
1697
1778
        ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1698
1779
 
1699
1780
        ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
1700
1781
        ut_a(bpage->buf_fix_count == 0);
1701
1782
 
1702
 
#if UNIV_WORD_SIZE == 4
1703
 
        /* On 32-bit systems, there is no padding in
1704
 
        buf_page_t.  On other systems, Valgrind could complain
1705
 
        about uninitialized pad bytes. */
1706
1783
        UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1707
 
#endif
1708
1784
 
1709
1785
        buf_LRU_remove_block(bpage);
1710
1786
 
1779
1855
                break;
1780
1856
        }
1781
1857
 
1782
 
        hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
 
1858
        fold = buf_page_address_fold(bpage->space, bpage->offset);
 
1859
        hashed_bpage = buf_page_hash_get_low(
 
1860
                buf_pool, bpage->space, bpage->offset, fold);
1783
1861
 
1784
1862
        if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
1785
1863
                fprintf(stderr,
1799
1877
 
1800
1878
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1801
1879
                mutex_exit(buf_page_get_mutex(bpage));
1802
 
                buf_pool_mutex_exit();
 
1880
                buf_pool_mutex_exit(buf_pool);
1803
1881
                buf_print();
1804
1882
                buf_LRU_print();
1805
1883
                buf_validate();
1811
1889
        ut_ad(!bpage->in_zip_hash);
1812
1890
        ut_ad(bpage->in_page_hash);
1813
1891
        ut_d(bpage->in_page_hash = FALSE);
1814
 
        HASH_DELETE(buf_page_t, hash, buf_pool->page_hash,
1815
 
                    buf_page_address_fold(bpage->space, bpage->offset),
1816
 
                    bpage);
 
1892
        HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
1817
1893
        switch (buf_page_get_state(bpage)) {
1818
1894
        case BUF_BLOCK_ZIP_PAGE:
1819
1895
                ut_ad(!bpage->in_free_list);
1824
1900
 
1825
1901
                UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
1826
1902
 
1827
 
                mutex_exit(&buf_pool_zip_mutex);
1828
 
                buf_pool_mutex_exit_forbid();
1829
 
                buf_buddy_free(bpage->zip.data,
1830
 
                               page_zip_get_size(&bpage->zip));
1831
 
                buf_buddy_free(bpage, sizeof(*bpage));
1832
 
                buf_pool_mutex_exit_allow();
 
1903
                mutex_exit(&buf_pool->zip_mutex);
 
1904
                buf_pool_mutex_exit_forbid(buf_pool);
 
1905
 
 
1906
                buf_buddy_free(
 
1907
                        buf_pool, bpage->zip.data,
 
1908
                        page_zip_get_size(&bpage->zip));
 
1909
 
 
1910
                buf_buddy_free(buf_pool, bpage, sizeof(*bpage));
 
1911
                buf_pool_mutex_exit_allow(buf_pool);
 
1912
 
1833
1913
                UNIV_MEM_UNDESC(bpage);
1834
1914
                return(BUF_BLOCK_ZIP_FREE);
1835
1915
 
1851
1931
                        ut_ad(!bpage->in_flush_list);
1852
1932
                        ut_ad(!bpage->in_LRU_list);
1853
1933
                        mutex_exit(&((buf_block_t*) bpage)->mutex);
1854
 
                        buf_pool_mutex_exit_forbid();
1855
 
                        buf_buddy_free(data, page_zip_get_size(&bpage->zip));
1856
 
                        buf_pool_mutex_exit_allow();
 
1934
                        buf_pool_mutex_exit_forbid(buf_pool);
 
1935
 
 
1936
                        buf_buddy_free(
 
1937
                                buf_pool, data,
 
1938
                                page_zip_get_size(&bpage->zip));
 
1939
 
 
1940
                        buf_pool_mutex_exit_allow(buf_pool);
1857
1941
                        mutex_enter(&((buf_block_t*) bpage)->mutex);
1858
1942
                        page_zip_set_size(&bpage->zip, 0);
1859
1943
                }
1882
1966
        buf_block_t*    block)  /*!< in: block, must contain a file page and
1883
1967
                                be in a state where it can be freed */
1884
1968
{
1885
 
        ut_ad(buf_pool_mutex_own());
 
1969
#ifdef UNIV_DEBUG
 
1970
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
 
1971
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1972
#endif
1886
1973
        ut_ad(mutex_own(&block->mutex));
1887
1974
 
1888
1975
        buf_block_set_state(block, BUF_BLOCK_MEMORY);
1891
1978
}
1892
1979
 
1893
1980
/**********************************************************************//**
1894
 
Updates buf_LRU_old_ratio.
 
1981
Updates buf_pool->LRU_old_ratio for one buffer pool instance.
1895
1982
@return updated old_pct */
1896
 
UNIV_INTERN
 
1983
static
1897
1984
uint
1898
 
buf_LRU_old_ratio_update(
1899
 
/*=====================*/
1900
 
        uint    old_pct,/*!< in: Reserve this percentage of
1901
 
                        the buffer pool for "old" blocks. */
1902
 
        ibool   adjust) /*!< in: TRUE=adjust the LRU list;
1903
 
                        FALSE=just assign buf_LRU_old_ratio
1904
 
                        during the initialization of InnoDB */
 
1985
buf_LRU_old_ratio_update_instance(
 
1986
/*==============================*/
 
1987
        buf_pool_t*     buf_pool,/*!< in: buffer pool instance */
 
1988
        uint            old_pct,/*!< in: Reserve this percentage of
 
1989
                                the buffer pool for "old" blocks. */
 
1990
        ibool           adjust) /*!< in: TRUE=adjust the LRU list;
 
1991
                                FALSE=just assign buf_pool->LRU_old_ratio
 
1992
                                during the initialization of InnoDB */
1905
1993
{
1906
1994
        uint    ratio;
1907
1995
 
1913
2001
        }
1914
2002
 
1915
2003
        if (adjust) {
1916
 
                buf_pool_mutex_enter();
 
2004
                buf_pool_mutex_enter(buf_pool);
1917
2005
 
1918
 
                if (ratio != buf_LRU_old_ratio) {
1919
 
                        buf_LRU_old_ratio = ratio;
 
2006
                if (ratio != buf_pool->LRU_old_ratio) {
 
2007
                        buf_pool->LRU_old_ratio = ratio;
1920
2008
 
1921
2009
                        if (UT_LIST_GET_LEN(buf_pool->LRU)
1922
 
                            >= BUF_LRU_OLD_MIN_LEN) {
1923
 
                                buf_LRU_old_adjust_len();
 
2010
                           >= BUF_LRU_OLD_MIN_LEN) {
 
2011
 
 
2012
                                buf_LRU_old_adjust_len(buf_pool);
1924
2013
                        }
1925
2014
                }
1926
2015
 
1927
 
                buf_pool_mutex_exit();
 
2016
                buf_pool_mutex_exit(buf_pool);
1928
2017
        } else {
1929
 
                buf_LRU_old_ratio = ratio;
 
2018
                buf_pool->LRU_old_ratio = ratio;
1930
2019
        }
1931
 
 
1932
2020
        /* the reverse of 
1933
2021
        ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
1934
2022
        return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5));
1935
2023
}
1936
2024
 
 
2025
/**********************************************************************//**
 
2026
Updates buf_pool->LRU_old_ratio.
 
2027
@return updated old_pct */
 
2028
UNIV_INTERN
 
2029
ulint
 
2030
buf_LRU_old_ratio_update(
 
2031
/*=====================*/
 
2032
        uint    old_pct,/*!< in: Reserve this percentage of
 
2033
                        the buffer pool for "old" blocks. */
 
2034
        ibool   adjust) /*!< in: TRUE=adjust the LRU list;
 
2035
                        FALSE=just assign buf_pool->LRU_old_ratio
 
2036
                        during the initialization of InnoDB */
 
2037
{
 
2038
        ulint   i;
 
2039
        ulint   new_ratio = 0;
 
2040
 
 
2041
        for (i = 0; i < srv_buf_pool_instances; i++) {
 
2042
                buf_pool_t*     buf_pool;
 
2043
 
 
2044
                buf_pool = buf_pool_from_array(i);
 
2045
 
 
2046
                new_ratio = buf_LRU_old_ratio_update_instance(
 
2047
                        buf_pool, old_pct, adjust);
 
2048
        }
 
2049
 
 
2050
        return(new_ratio);
 
2051
}
 
2052
 
1937
2053
/********************************************************************//**
1938
2054
Update the historical stats that we are collecting for LRU eviction
1939
2055
policy at the end of each interval. */
1942
2058
buf_LRU_stat_update(void)
1943
2059
/*=====================*/
1944
2060
{
 
2061
        ulint           i;
1945
2062
        buf_LRU_stat_t* item;
 
2063
        buf_pool_t*     buf_pool;
 
2064
        ibool           evict_started = FALSE;
1946
2065
 
1947
2066
        /* If we haven't started eviction yet then don't update stats. */
1948
 
        if (buf_pool->freed_page_clock == 0) {
 
2067
        for (i = 0; i < srv_buf_pool_instances; i++) {
 
2068
 
 
2069
                buf_pool = buf_pool_from_array(i);
 
2070
 
 
2071
                if (buf_pool->freed_page_clock != 0) {
 
2072
                        evict_started = TRUE;
 
2073
                        break;
 
2074
                }
 
2075
        }
 
2076
 
 
2077
        if (!evict_started) {
1949
2078
                goto func_exit;
1950
2079
        }
1951
2080
 
1952
 
        buf_pool_mutex_enter();
1953
 
 
1954
2081
        /* Update the index. */
1955
2082
        item = &buf_LRU_stat_arr[buf_LRU_stat_arr_ind];
1956
2083
        buf_LRU_stat_arr_ind++;
1963
2090
        /* Put current entry in the array. */
1964
2091
        memcpy(item, &buf_LRU_stat_cur, sizeof *item);
1965
2092
 
1966
 
        buf_pool_mutex_exit();
1967
 
 
1968
2093
func_exit:
1969
2094
        /* Clear the current entry. */
1970
2095
        memset(&buf_LRU_stat_cur, 0, sizeof buf_LRU_stat_cur);
1972
2097
 
1973
2098
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1974
2099
/**********************************************************************//**
1975
 
Validates the LRU list.
1976
 
@return TRUE */
1977
 
UNIV_INTERN
1978
 
ibool
1979
 
buf_LRU_validate(void)
1980
 
/*==================*/
 
2100
Validates the LRU list for one buffer pool instance. */
 
2101
static
 
2102
void
 
2103
buf_LRU_validate_instance(
 
2104
/*======================*/
 
2105
        buf_pool_t*     buf_pool)
1981
2106
{
1982
2107
        buf_page_t*     bpage;
1983
2108
        buf_block_t*    block;
1985
2110
        ulint           new_len;
1986
2111
 
1987
2112
        ut_ad(buf_pool);
1988
 
        buf_pool_mutex_enter();
 
2113
        buf_pool_mutex_enter(buf_pool);
1989
2114
 
1990
2115
        if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
1991
2116
 
1992
2117
                ut_a(buf_pool->LRU_old);
1993
2118
                old_len = buf_pool->LRU_old_len;
1994
2119
                new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1995
 
                                 * buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
 
2120
                                 * buf_pool->LRU_old_ratio
 
2121
                                 / BUF_LRU_OLD_RATIO_DIV,
1996
2122
                                 UT_LIST_GET_LEN(buf_pool->LRU)
1997
2123
                                 - (BUF_LRU_OLD_TOLERANCE
1998
2124
                                    + BUF_LRU_NON_OLD_MIN_LEN));
2068
2194
                ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
2069
2195
        }
2070
2196
 
2071
 
        buf_pool_mutex_exit();
 
2197
        buf_pool_mutex_exit(buf_pool);
 
2198
}
 
2199
 
 
2200
/**********************************************************************//**
 
2201
Validates the LRU list.
 
2202
@return TRUE */
 
2203
UNIV_INTERN
 
2204
ibool
 
2205
buf_LRU_validate(void)
 
2206
/*==================*/
 
2207
{
 
2208
        ulint   i;
 
2209
 
 
2210
        for (i = 0; i < srv_buf_pool_instances; i++) {
 
2211
                buf_pool_t*     buf_pool;
 
2212
 
 
2213
                buf_pool = buf_pool_from_array(i);
 
2214
                buf_LRU_validate_instance(buf_pool);
 
2215
        }
 
2216
 
2072
2217
        return(TRUE);
2073
2218
}
2074
2219
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2075
2220
 
2076
2221
#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2077
2222
/**********************************************************************//**
2078
 
Prints the LRU list. */
 
2223
Prints the LRU list for one buffer pool instance. */
2079
2224
UNIV_INTERN
2080
2225
void
2081
 
buf_LRU_print(void)
2082
 
/*===============*/
 
2226
buf_LRU_print_instance(
 
2227
/*===================*/
 
2228
        buf_pool_t*     buf_pool)
2083
2229
{
2084
2230
        const buf_page_t*       bpage;
2085
2231
 
2086
2232
        ut_ad(buf_pool);
2087
 
        buf_pool_mutex_enter();
 
2233
        buf_pool_mutex_enter(buf_pool);
2088
2234
 
2089
2235
        bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
2090
2236
 
2091
2237
        while (bpage != NULL) {
2092
2238
 
 
2239
                mutex_enter(buf_page_get_mutex(bpage));
2093
2240
                fprintf(stderr, "BLOCK space %lu page %lu ",
2094
2241
                        (ulong) buf_page_get_space(bpage),
2095
2242
                        (ulong) buf_page_get_page_no(bpage));
2138
2285
                        break;
2139
2286
                }
2140
2287
 
 
2288
                mutex_exit(buf_page_get_mutex(bpage));
2141
2289
                bpage = UT_LIST_GET_NEXT(LRU, bpage);
2142
2290
        }
2143
2291
 
2144
 
        buf_pool_mutex_exit();
 
2292
        buf_pool_mutex_exit(buf_pool);
 
2293
}
 
2294
 
 
2295
/**********************************************************************//**
 
2296
Prints the LRU list. */
 
2297
UNIV_INTERN
 
2298
void
 
2299
buf_LRU_print(void)
 
2300
/*===============*/
 
2301
{
 
2302
        ulint           i;
 
2303
        buf_pool_t*     buf_pool;
 
2304
 
 
2305
        for (i = 0; i < srv_buf_pool_instances; i++) {
 
2306
                buf_pool = buf_pool_from_array(i);
 
2307
                buf_LRU_print_instance(buf_pool);
 
2308
        }
2145
2309
}
2146
2310
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */