~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/buf/buf0lru.c

  • Committer: Brian Aker
  • Date: 2010-10-28 17:12:01 UTC
  • mfrom: (1887.1.3 merge)
  • Revision ID: brian@tangent.org-20101028171201-baj6l1bnntn1s4ad
Merge in POTFILES changes.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (C) 1995, 2010, Innobase Oy. All Rights Reserved.
 
3
Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved.
4
4
 
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
51
51
#include "srv0srv.h"
52
52
 
53
53
/** The number of blocks from the LRU_old pointer onward, including
54
 
the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
 
54
the block pointed to, must be buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
55
55
of the whole LRU list length, except that the tolerance defined below
56
56
is allowed. Note that the tolerance must be small enough such that for
57
57
even the BUF_LRU_OLD_MIN_LEN long LRU list, the LRU_old pointer is not
97
97
#define BUF_LRU_IO_TO_UNZIP_FACTOR 50
98
98
 
99
99
/** Sampled values buf_LRU_stat_cur.
100
 
Not protected by any mutex.  Updated by buf_LRU_stat_update(). */
 
100
Protected by buf_pool_mutex.  Updated by buf_LRU_stat_update(). */
101
101
static buf_LRU_stat_t           buf_LRU_stat_arr[BUF_LRU_STAT_N_INTERVAL];
102
 
 
103
102
/** Cursor to buf_LRU_stat_arr[] that is updated in a round-robin fashion. */
104
103
static ulint                    buf_LRU_stat_arr_ind;
105
104
 
108
107
UNIV_INTERN buf_LRU_stat_t      buf_LRU_stat_cur;
109
108
 
110
109
/** Running sum of past values of buf_LRU_stat_cur.
111
 
Updated by buf_LRU_stat_update().  Not Protected by any mutex. */
 
110
Updated by buf_LRU_stat_update().  Protected by buf_pool_mutex. */
112
111
UNIV_INTERN buf_LRU_stat_t      buf_LRU_stat_sum;
113
112
 
114
113
/* @} */
115
114
 
116
115
/** @name Heuristics for detecting index scan @{ */
 
116
/** Reserve this much/BUF_LRU_OLD_RATIO_DIV of the buffer pool for
 
117
"old" blocks.  Protected by buf_pool_mutex. */
 
118
UNIV_INTERN uint        buf_LRU_old_ratio;
117
119
/** Move blocks to "new" LRU list only if the first access was at
118
120
least this many milliseconds ago.  Not protected by any mutex or latch. */
119
121
UNIV_INTERN uint        buf_LRU_old_threshold_ms;
122
124
/******************************************************************//**
123
125
Takes a block out of the LRU list and page hash table.
124
126
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
125
 
the object will be freed and buf_pool->zip_mutex will be released.
 
127
the object will be freed and buf_pool_zip_mutex will be released.
126
128
 
127
129
If a compressed page or a compressed-only block descriptor is freed,
128
130
other compressed pages or compressed-only block descriptors may be
153
155
@return TRUE if should use unzip_LRU */
154
156
UNIV_INLINE
155
157
ibool
156
 
buf_LRU_evict_from_unzip_LRU(
157
 
/*=========================*/
158
 
        buf_pool_t*     buf_pool)
 
158
buf_LRU_evict_from_unzip_LRU(void)
 
159
/*==============================*/
159
160
{
160
161
        ulint   io_avg;
161
162
        ulint   unzip_avg;
162
163
 
163
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
164
        ut_ad(buf_pool_mutex_own());
164
165
 
165
166
        /* If the unzip_LRU list is empty, we can only use the LRU. */
166
167
        if (UT_LIST_GET_LEN(buf_pool->unzip_LRU) == 0) {
228
229
void
229
230
buf_LRU_drop_page_hash_for_tablespace(
230
231
/*==================================*/
231
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
232
 
        ulint           id)             /*!< in: space id */
 
232
        ulint   id)     /*!< in: space id */
233
233
{
234
234
        buf_page_t*     bpage;
235
235
        ulint*          page_arr;
244
244
                return;
245
245
        }
246
246
 
247
 
        page_arr = static_cast<unsigned long *>(ut_malloc(
248
 
                sizeof(ulint) * BUF_LRU_DROP_SEARCH_HASH_SIZE));
249
 
 
250
 
        buf_pool_mutex_enter(buf_pool);
 
247
        page_arr = ut_malloc(sizeof(ulint)
 
248
                             * BUF_LRU_DROP_SEARCH_HASH_SIZE);
 
249
        buf_pool_mutex_enter();
251
250
 
252
251
scan_again:
253
252
        num_entries = 0;
285
284
                        if (num_entries < BUF_LRU_DROP_SEARCH_HASH_SIZE) {
286
285
                                goto next_page;
287
286
                        }
288
 
 
289
 
                        /* Array full. We release the buf_pool->mutex to
 
287
                        /* Array full. We release the buf_pool_mutex to
290
288
                        obey the latching order. */
291
 
                        buf_pool_mutex_exit(buf_pool);
292
 
 
293
 
                        buf_LRU_drop_page_hash_batch(
294
 
                                id, zip_size, page_arr, num_entries);
295
 
 
 
289
                        buf_pool_mutex_exit();
 
290
 
 
291
                        buf_LRU_drop_page_hash_batch(id, zip_size, page_arr,
 
292
                                                     num_entries);
296
293
                        num_entries = 0;
297
 
 
298
 
                        buf_pool_mutex_enter(buf_pool);
 
294
                        buf_pool_mutex_enter();
299
295
                } else {
300
296
                        mutex_exit(block_mutex);
301
297
                }
320
316
                }
321
317
        }
322
318
 
323
 
        buf_pool_mutex_exit(buf_pool);
 
319
        buf_pool_mutex_exit();
324
320
 
325
321
        /* Drop any remaining batch of search hashed pages. */
326
322
        buf_LRU_drop_page_hash_batch(id, zip_size, page_arr, num_entries);
328
324
}
329
325
 
330
326
/******************************************************************//**
331
 
Invalidates all pages belonging to a given tablespace inside a specific
332
 
buffer pool instance when we are deleting the data file(s) of that
333
 
tablespace. */
334
 
static
 
327
Invalidates all pages belonging to a given tablespace when we are deleting
 
328
the data file(s) of that tablespace. */
 
329
UNIV_INTERN
335
330
void
336
 
buf_LRU_invalidate_tablespace_buf_pool_instance(
337
 
/*============================================*/
338
 
        buf_pool_t*     buf_pool,       /*!< buffer pool instance */
339
 
        ulint           id)             /*!< in: space id */
 
331
buf_LRU_invalidate_tablespace(
 
332
/*==========================*/
 
333
        ulint   id)     /*!< in: space id */
340
334
{
341
335
        buf_page_t*     bpage;
342
336
        ibool           all_freed;
343
337
 
 
338
        /* Before we attempt to drop pages one by one we first
 
339
        attempt to drop page hash index entries in batches to make
 
340
        it more efficient. The batching attempt is a best effort
 
341
        attempt and does not guarantee that all pages hash entries
 
342
        will be dropped. We get rid of remaining page hash entries
 
343
        one by one below. */
 
344
        buf_LRU_drop_page_hash_for_tablespace(id);
 
345
 
344
346
scan_again:
345
 
        buf_pool_mutex_enter(buf_pool);
 
347
        buf_pool_mutex_enter();
346
348
 
347
349
        all_freed = TRUE;
348
350
 
349
351
        bpage = UT_LIST_GET_LAST(buf_pool->LRU);
350
352
 
351
353
        while (bpage != NULL) {
 
354
                mutex_t*        block_mutex = buf_page_get_mutex(bpage);
352
355
                buf_page_t*     prev_bpage;
353
 
                ibool           prev_bpage_buf_fix = FALSE;
354
356
 
355
357
                ut_a(buf_page_in_file(bpage));
356
358
 
 
359
                mutex_enter(block_mutex);
357
360
                prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
358
361
 
359
 
                /* bpage->space and bpage->io_fix are protected by
360
 
                buf_pool->mutex and block_mutex.  It is safe to check
361
 
                them while holding buf_pool->mutex only. */
362
 
 
363
 
                if (buf_page_get_space(bpage) != id) {
364
 
                        /* Skip this block, as it does not belong to
365
 
                        the space that is being invalidated. */
366
 
                } else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
367
 
                        /* We cannot remove this page during this scan
368
 
                        yet; maybe the system is currently reading it
369
 
                        in, or flushing the modifications to the file */
370
 
 
371
 
                        all_freed = FALSE;
372
 
                } else {
373
 
                        mutex_t* block_mutex = buf_page_get_mutex(bpage);
374
 
                        mutex_enter(block_mutex);
375
 
 
376
 
                        if (bpage->buf_fix_count > 0) {
 
362
                if (buf_page_get_space(bpage) == id) {
 
363
                        if (bpage->buf_fix_count > 0
 
364
                            || buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
377
365
 
378
366
                                /* We cannot remove this page during
379
367
                                this scan yet; maybe the system is
393
381
                                        (ulong) buf_page_get_page_no(bpage));
394
382
                        }
395
383
#endif
396
 
                        if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
397
 
                                /* This is a compressed-only block
398
 
                                descriptor.  Ensure that prev_bpage
399
 
                                cannot be relocated when bpage is freed. */
400
 
                                if (UNIV_LIKELY(prev_bpage != NULL)) {
401
 
                                        switch (buf_page_get_state(
402
 
                                                        prev_bpage)) {
403
 
                                        case BUF_BLOCK_FILE_PAGE:
404
 
                                                /* Descriptors of uncompressed
405
 
                                                blocks will not be relocated,
406
 
                                                because we are holding the
407
 
                                                buf_pool->mutex. */
408
 
                                                break;
409
 
                                        case BUF_BLOCK_ZIP_PAGE:
410
 
                                        case BUF_BLOCK_ZIP_DIRTY:
411
 
                                                /* Descriptors of compressed-
412
 
                                                only blocks can be relocated,
413
 
                                                unless they are buffer-fixed.
414
 
                                                Because both bpage and
415
 
                                                prev_bpage are protected by
416
 
                                                buf_pool_zip_mutex, it is
417
 
                                                not necessary to acquire
418
 
                                                further mutexes. */
419
 
                                                ut_ad(&buf_pool->zip_mutex
420
 
                                                      == block_mutex);
421
 
                                                ut_ad(mutex_own(block_mutex));
422
 
                                                prev_bpage_buf_fix = TRUE;
423
 
                                                prev_bpage->buf_fix_count++;
424
 
                                                break;
425
 
                                        default:
426
 
                                                ut_error;
427
 
                                        }
428
 
                                }
429
 
                        } else if (((buf_block_t*) bpage)->is_hashed) {
 
384
                        if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE
 
385
                            && ((buf_block_t*) bpage)->is_hashed) {
430
386
                                ulint   page_no;
431
387
                                ulint   zip_size;
432
388
 
433
 
                                buf_pool_mutex_exit(buf_pool);
 
389
                                buf_pool_mutex_exit();
434
390
 
435
391
                                zip_size = buf_page_get_zip_size(bpage);
436
392
                                page_no = buf_page_get_page_no(bpage);
450
406
                                buf_flush_remove(bpage);
451
407
                        }
452
408
 
453
 
                        /* Remove from the LRU list. */
454
 
 
 
409
                        /* Remove from the LRU list */
455
410
                        if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
456
411
                            != BUF_BLOCK_ZIP_FREE) {
457
412
                                buf_LRU_block_free_hashed_page((buf_block_t*)
460
415
                                /* The block_mutex should have been
461
416
                                released by buf_LRU_block_remove_hashed_page()
462
417
                                when it returns BUF_BLOCK_ZIP_FREE. */
463
 
                                ut_ad(block_mutex == &buf_pool->zip_mutex);
 
418
                                ut_ad(block_mutex == &buf_pool_zip_mutex);
464
419
                                ut_ad(!mutex_own(block_mutex));
465
420
 
466
 
                                if (prev_bpage_buf_fix) {
467
 
                                        /* We temporarily buffer-fixed
468
 
                                        prev_bpage, so that
469
 
                                        buf_buddy_free() could not
470
 
                                        relocate it, in case it was a
471
 
                                        compressed-only block
472
 
                                        descriptor. */
473
 
 
474
 
                                        mutex_enter(block_mutex);
475
 
                                        ut_ad(prev_bpage->buf_fix_count > 0);
476
 
                                        prev_bpage->buf_fix_count--;
477
 
                                        mutex_exit(block_mutex);
478
 
                                }
479
 
 
480
 
                                goto next_page_no_mutex;
 
421
                                /* The compressed block descriptor
 
422
                                (bpage) has been deallocated and
 
423
                                block_mutex released.  Also,
 
424
                                buf_buddy_free() may have relocated
 
425
                                prev_bpage.  Rescan the LRU list. */
 
426
 
 
427
                                bpage = UT_LIST_GET_LAST(buf_pool->LRU);
 
428
                                continue;
481
429
                        }
 
430
                }
482
431
next_page:
483
 
                        mutex_exit(block_mutex);
484
 
                }
485
 
 
486
 
next_page_no_mutex:
 
432
                mutex_exit(block_mutex);
487
433
                bpage = prev_bpage;
488
434
        }
489
435
 
490
 
        buf_pool_mutex_exit(buf_pool);
 
436
        buf_pool_mutex_exit();
491
437
 
492
438
        if (!all_freed) {
493
439
                os_thread_sleep(20000);
496
442
        }
497
443
}
498
444
 
499
 
/******************************************************************//**
500
 
Invalidates all pages belonging to a given tablespace when we are deleting
501
 
the data file(s) of that tablespace. */
502
 
UNIV_INTERN
503
 
void
504
 
buf_LRU_invalidate_tablespace(
505
 
/*==========================*/
506
 
        ulint   id)     /*!< in: space id */
507
 
{
508
 
        ulint   i;
509
 
 
510
 
        /* Before we attempt to drop pages one by one we first
511
 
        attempt to drop page hash index entries in batches to make
512
 
        it more efficient. The batching attempt is a best effort
513
 
        attempt and does not guarantee that all pages hash entries
514
 
        will be dropped. We get rid of remaining page hash entries
515
 
        one by one below. */
516
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
517
 
                buf_pool_t*     buf_pool;
518
 
 
519
 
                buf_pool = buf_pool_from_array(i);
520
 
                buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
521
 
                buf_LRU_invalidate_tablespace_buf_pool_instance(buf_pool, id);
522
 
        }
523
 
}
524
 
 
525
445
/********************************************************************//**
526
446
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
527
447
UNIV_INTERN
531
451
        buf_page_t*     bpage)  /*!< in: pointer to the block in question */
532
452
{
533
453
        buf_page_t*     b;
534
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
535
454
 
536
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
455
        ut_ad(buf_pool_mutex_own());
537
456
        ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE);
538
457
 
539
458
        /* Find the first successor of bpage in the LRU list
563
482
ibool
564
483
buf_LRU_free_from_unzip_LRU_list(
565
484
/*=============================*/
566
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
567
 
        ulint           n_iterations)   /*!< in: how many times this has
568
 
                                        been called repeatedly without
569
 
                                        result: a high value means that
570
 
                                        we should search farther; we will
571
 
                                        search n_iterations / 5 of the
572
 
                                        unzip_LRU list, or nothing if
573
 
                                        n_iterations >= 5 */
 
485
        ulint   n_iterations)   /*!< in: how many times this has been called
 
486
                                repeatedly without result: a high value means
 
487
                                that we should search farther; we will search
 
488
                                n_iterations / 5 of the unzip_LRU list,
 
489
                                or nothing if n_iterations >= 5 */
574
490
{
575
491
        buf_block_t*    block;
576
492
        ulint           distance;
577
493
 
578
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
494
        ut_ad(buf_pool_mutex_own());
579
495
 
580
496
        /* Theoratically it should be much easier to find a victim
581
497
        from unzip_LRU as we can choose even a dirty block (as we'll
585
501
        if we have done five iterations so far. */
586
502
 
587
503
        if (UNIV_UNLIKELY(n_iterations >= 5)
588
 
            || !buf_LRU_evict_from_unzip_LRU(buf_pool)) {
 
504
            || !buf_LRU_evict_from_unzip_LRU()) {
589
505
 
590
506
                return(FALSE);
591
507
        }
637
553
ibool
638
554
buf_LRU_free_from_common_LRU_list(
639
555
/*==============================*/
640
 
        buf_pool_t*     buf_pool,
641
 
        ulint           n_iterations)
642
 
                                /*!< in: how many times this has been called
 
556
        ulint   n_iterations)   /*!< in: how many times this has been called
643
557
                                repeatedly without result: a high value means
644
558
                                that we should search farther; if
645
559
                                n_iterations < 10, then we search
649
563
        buf_page_t*     bpage;
650
564
        ulint           distance;
651
565
 
652
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
566
        ut_ad(buf_pool_mutex_own());
653
567
 
654
568
        distance = 100 + (n_iterations * buf_pool->curr_size) / 10;
655
569
 
706
620
ibool
707
621
buf_LRU_search_and_free_block(
708
622
/*==========================*/
709
 
        buf_pool_t*     buf_pool,
710
 
                                /*!< in: buffer pool instance */
711
 
        ulint           n_iterations)
712
 
                                /*!< in: how many times this has been called
 
623
        ulint   n_iterations)   /*!< in: how many times this has been called
713
624
                                repeatedly without result: a high value means
714
625
                                that we should search farther; if
715
626
                                n_iterations < 10, then we search
720
631
{
721
632
        ibool   freed = FALSE;
722
633
 
723
 
        buf_pool_mutex_enter(buf_pool);
 
634
        buf_pool_mutex_enter();
724
635
 
725
 
        freed = buf_LRU_free_from_unzip_LRU_list(buf_pool, n_iterations);
 
636
        freed = buf_LRU_free_from_unzip_LRU_list(n_iterations);
726
637
 
727
638
        if (!freed) {
728
 
                freed = buf_LRU_free_from_common_LRU_list(
729
 
                        buf_pool, n_iterations);
 
639
                freed = buf_LRU_free_from_common_LRU_list(n_iterations);
730
640
        }
731
641
 
732
642
        if (!freed) {
735
645
                buf_pool->LRU_flush_ended--;
736
646
        }
737
647
 
738
 
        buf_pool_mutex_exit(buf_pool);
 
648
        buf_pool_mutex_exit();
739
649
 
740
650
        return(freed);
741
651
}
750
660
wasted. */
751
661
UNIV_INTERN
752
662
void
753
 
buf_LRU_try_free_flushed_blocks(
754
 
/*============================*/
755
 
        buf_pool_t*     buf_pool)               /*!< in: buffer pool instance */
 
663
buf_LRU_try_free_flushed_blocks(void)
 
664
/*=================================*/
756
665
{
757
 
 
758
 
        if (buf_pool == NULL) {
759
 
                ulint   i;
760
 
 
761
 
                for (i = 0; i < srv_buf_pool_instances; i++) {
762
 
                        buf_pool = buf_pool_from_array(i);
763
 
                        buf_LRU_try_free_flushed_blocks(buf_pool);
764
 
                }
765
 
        } else {
766
 
                buf_pool_mutex_enter(buf_pool);
767
 
 
768
 
                while (buf_pool->LRU_flush_ended > 0) {
769
 
 
770
 
                        buf_pool_mutex_exit(buf_pool);
771
 
 
772
 
                        buf_LRU_search_and_free_block(buf_pool, 1);
773
 
 
774
 
                        buf_pool_mutex_enter(buf_pool);
775
 
                }
776
 
 
777
 
                buf_pool_mutex_exit(buf_pool);
 
666
        buf_pool_mutex_enter();
 
667
 
 
668
        while (buf_pool->LRU_flush_ended > 0) {
 
669
 
 
670
                buf_pool_mutex_exit();
 
671
 
 
672
                buf_LRU_search_and_free_block(1);
 
673
 
 
674
                buf_pool_mutex_enter();
778
675
        }
 
676
 
 
677
        buf_pool_mutex_exit();
779
678
}
780
679
 
781
680
/******************************************************************//**
782
 
Returns TRUE if less than 25 % of the buffer pool in any instance is
783
 
available. This can be used in heuristics to prevent huge transactions
784
 
eating up the whole buffer pool for their locks.
 
681
Returns TRUE if less than 25 % of the buffer pool is available. This can be
 
682
used in heuristics to prevent huge transactions eating up the whole buffer
 
683
pool for their locks.
785
684
@return TRUE if less than 25 % of buffer pool left */
786
685
UNIV_INTERN
787
686
ibool
788
687
buf_LRU_buf_pool_running_out(void)
789
688
/*==============================*/
790
689
{
791
 
        ulint   i;
792
 
        ibool   ret = FALSE;
793
 
 
794
 
        for (i = 0; i < srv_buf_pool_instances && !ret; i++) {
795
 
                buf_pool_t*     buf_pool;
796
 
 
797
 
                buf_pool = buf_pool_from_array(i);
798
 
 
799
 
                buf_pool_mutex_enter(buf_pool);
800
 
 
801
 
                if (!recv_recovery_on
802
 
                    && UT_LIST_GET_LEN(buf_pool->free)
803
 
                       + UT_LIST_GET_LEN(buf_pool->LRU)
804
 
                       < buf_pool->curr_size / 4) {
805
 
 
806
 
                        ret = TRUE;
807
 
                }
808
 
 
809
 
                buf_pool_mutex_exit(buf_pool);
 
690
        ibool   ret     = FALSE;
 
691
 
 
692
        buf_pool_mutex_enter();
 
693
 
 
694
        if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
 
695
            + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 4) {
 
696
 
 
697
                ret = TRUE;
810
698
        }
811
699
 
 
700
        buf_pool_mutex_exit();
 
701
 
812
702
        return(ret);
813
703
}
814
704
 
818
708
@return a free control block, or NULL if the buf_block->free list is empty */
819
709
UNIV_INTERN
820
710
buf_block_t*
821
 
buf_LRU_get_free_only(
822
 
/*==================*/
823
 
        buf_pool_t*     buf_pool)
 
711
buf_LRU_get_free_only(void)
 
712
/*=======================*/
824
713
{
825
714
        buf_block_t*    block;
826
715
 
827
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
716
        ut_ad(buf_pool_mutex_own());
828
717
 
829
718
        block = (buf_block_t*) UT_LIST_GET_FIRST(buf_pool->free);
830
719
 
831
720
        if (block) {
832
 
 
833
721
                ut_ad(block->page.in_free_list);
834
722
                ut_d(block->page.in_free_list = FALSE);
835
723
                ut_ad(!block->page.in_flush_list);
842
730
                buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE);
843
731
                UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE);
844
732
 
845
 
                ut_ad(buf_pool_from_block(block) == buf_pool);
846
 
 
847
733
                mutex_exit(&block->mutex);
848
734
        }
849
735
 
859
745
buf_block_t*
860
746
buf_LRU_get_free_block(
861
747
/*===================*/
862
 
        buf_pool_t*     buf_pool,       /*!< in: buffer pool instance */
863
 
        ulint           zip_size)       /*!< in: compressed page size in bytes,
864
 
                                        or 0 if uncompressed tablespace */
 
748
        ulint   zip_size)       /*!< in: compressed page size in bytes,
 
749
                                or 0 if uncompressed tablespace */
865
750
{
866
751
        buf_block_t*    block           = NULL;
867
752
        ibool           freed;
869
754
        ibool           mon_value_was   = FALSE;
870
755
        ibool           started_monitor = FALSE;
871
756
loop:
872
 
        buf_pool_mutex_enter(buf_pool);
 
757
        buf_pool_mutex_enter();
873
758
 
874
759
        if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
875
760
            + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->curr_size / 20) {
936
821
        }
937
822
 
938
823
        /* If there is a block in the free list, take it */
939
 
        block = buf_LRU_get_free_only(buf_pool);
 
824
        block = buf_LRU_get_free_only();
940
825
        if (block) {
941
826
 
942
 
                ut_ad(buf_pool_from_block(block) == buf_pool);
943
 
 
944
827
#ifdef UNIV_DEBUG
945
828
                block->page.zip.m_start =
946
829
#endif /* UNIV_DEBUG */
951
834
                if (UNIV_UNLIKELY(zip_size)) {
952
835
                        ibool   lru;
953
836
                        page_zip_set_size(&block->page.zip, zip_size);
954
 
 
955
 
                        block->page.zip.data = static_cast<unsigned char *>(buf_buddy_alloc(
956
 
                                buf_pool, zip_size, &lru));
957
 
 
 
837
                        block->page.zip.data = buf_buddy_alloc(zip_size, &lru);
958
838
                        UNIV_MEM_DESC(block->page.zip.data, zip_size, block);
959
839
                } else {
960
840
                        page_zip_set_size(&block->page.zip, 0);
961
841
                        block->page.zip.data = NULL;
962
842
                }
963
843
 
964
 
                buf_pool_mutex_exit(buf_pool);
 
844
                buf_pool_mutex_exit();
965
845
 
966
846
                if (started_monitor) {
967
847
                        srv_print_innodb_monitor = mon_value_was;
973
853
        /* If no block was in the free list, search from the end of the LRU
974
854
        list and try to free a block there */
975
855
 
976
 
        buf_pool_mutex_exit(buf_pool);
 
856
        buf_pool_mutex_exit();
977
857
 
978
 
        freed = buf_LRU_search_and_free_block(buf_pool, n_iterations);
 
858
        freed = buf_LRU_search_and_free_block(n_iterations);
979
859
 
980
860
        if (freed > 0) {
981
861
                goto loop;
1017
897
 
1018
898
        /* No free block was found: try to flush the LRU list */
1019
899
 
1020
 
        buf_flush_free_margin(buf_pool);
 
900
        buf_flush_free_margin();
1021
901
        ++srv_buf_pool_wait_free;
1022
902
 
1023
903
        os_aio_simulated_wake_handler_threads();
1024
904
 
1025
 
        buf_pool_mutex_enter(buf_pool);
 
905
        buf_pool_mutex_enter();
1026
906
 
1027
907
        if (buf_pool->LRU_flush_ended > 0) {
1028
908
                /* We have written pages in an LRU flush. To make the insert
1029
909
                buffer more efficient, we try to move these pages to the free
1030
910
                list. */
1031
911
 
1032
 
                buf_pool_mutex_exit(buf_pool);
 
912
                buf_pool_mutex_exit();
1033
913
 
1034
 
                buf_LRU_try_free_flushed_blocks(buf_pool);
 
914
                buf_LRU_try_free_flushed_blocks();
1035
915
        } else {
1036
 
                buf_pool_mutex_exit(buf_pool);
 
916
                buf_pool_mutex_exit();
1037
917
        }
1038
918
 
1039
919
        if (n_iterations > 10) {
1051
931
is inside the allowed limits. */
1052
932
UNIV_INLINE
1053
933
void
1054
 
buf_LRU_old_adjust_len(
1055
 
/*===================*/
1056
 
        buf_pool_t*     buf_pool)       /*!< in: buffer pool instance */
 
934
buf_LRU_old_adjust_len(void)
 
935
/*========================*/
1057
936
{
1058
937
        ulint   old_len;
1059
938
        ulint   new_len;
1060
939
 
1061
940
        ut_a(buf_pool->LRU_old);
1062
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1063
 
        ut_ad(buf_pool->LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
1064
 
        ut_ad(buf_pool->LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
 
941
        ut_ad(buf_pool_mutex_own());
 
942
        ut_ad(buf_LRU_old_ratio >= BUF_LRU_OLD_RATIO_MIN);
 
943
        ut_ad(buf_LRU_old_ratio <= BUF_LRU_OLD_RATIO_MAX);
1065
944
#if BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)
1066
945
# error "BUF_LRU_OLD_RATIO_MIN * BUF_LRU_OLD_MIN_LEN <= BUF_LRU_OLD_RATIO_DIV * (BUF_LRU_OLD_TOLERANCE + 5)"
1067
946
#endif
1077
956
 
1078
957
        old_len = buf_pool->LRU_old_len;
1079
958
        new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
1080
 
                         * buf_pool->LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
 
959
                         * buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
1081
960
                         UT_LIST_GET_LEN(buf_pool->LRU)
1082
961
                         - (BUF_LRU_OLD_TOLERANCE
1083
962
                            + BUF_LRU_NON_OLD_MIN_LEN));
1119
998
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
1120
999
static
1121
1000
void
1122
 
buf_LRU_old_init(
1123
 
/*=============*/
1124
 
        buf_pool_t*     buf_pool)
 
1001
buf_LRU_old_init(void)
 
1002
/*==================*/
1125
1003
{
1126
1004
        buf_page_t*     bpage;
1127
1005
 
1128
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1006
        ut_ad(buf_pool_mutex_own());
1129
1007
        ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);
1130
1008
 
1131
1009
        /* We first initialize all blocks in the LRU list as old and then use
1144
1022
        buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
1145
1023
        buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);
1146
1024
 
1147
 
        buf_LRU_old_adjust_len(buf_pool);
 
1025
        buf_LRU_old_adjust_len();
1148
1026
}
1149
1027
 
1150
1028
/******************************************************************//**
1155
1033
/*=================================*/
1156
1034
        buf_page_t*     bpage)  /*!< in/out: control block */
1157
1035
{
1158
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1159
 
 
1160
1036
        ut_ad(buf_pool);
1161
1037
        ut_ad(bpage);
1162
1038
        ut_ad(buf_page_in_file(bpage));
1163
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1039
        ut_ad(buf_pool_mutex_own());
1164
1040
 
1165
1041
        if (buf_page_belongs_to_unzip_LRU(bpage)) {
1166
1042
                buf_block_t*    block = (buf_block_t*) bpage;
1180
1056
/*=================*/
1181
1057
        buf_page_t*     bpage)  /*!< in: control block */
1182
1058
{
1183
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1184
 
 
1185
1059
        ut_ad(buf_pool);
1186
1060
        ut_ad(bpage);
1187
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1061
        ut_ad(buf_pool_mutex_own());
1188
1062
 
1189
1063
        ut_a(buf_page_in_file(bpage));
1190
1064
 
1198
1072
                /* Below: the previous block is guaranteed to exist,
1199
1073
                because the LRU_old pointer is only allowed to differ
1200
1074
                by BUF_LRU_OLD_TOLERANCE from strict
1201
 
                buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
 
1075
                buf_LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV of the LRU
1202
1076
                list length. */
1203
1077
                buf_page_t*     prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
1204
1078
 
1244
1118
        }
1245
1119
 
1246
1120
        /* Adjust the length of the old block list if necessary */
1247
 
        buf_LRU_old_adjust_len(buf_pool);
 
1121
        buf_LRU_old_adjust_len();
1248
1122
}
1249
1123
 
1250
1124
/******************************************************************//**
1257
1131
        ibool           old)    /*!< in: TRUE if should be put to the end
1258
1132
                                of the list, else put to the start */
1259
1133
{
1260
 
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
1261
 
 
1262
1134
        ut_ad(buf_pool);
1263
1135
        ut_ad(block);
1264
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1136
        ut_ad(buf_pool_mutex_own());
1265
1137
 
1266
1138
        ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
1267
1139
 
1283
1155
/*=========================*/
1284
1156
        buf_page_t*     bpage)  /*!< in: control block */
1285
1157
{
1286
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1287
 
 
1288
1158
        ut_ad(buf_pool);
1289
1159
        ut_ad(bpage);
1290
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1160
        ut_ad(buf_pool_mutex_own());
1291
1161
 
1292
1162
        ut_a(buf_page_in_file(bpage));
1293
1163
 
1303
1173
 
1304
1174
                buf_page_set_old(bpage, TRUE);
1305
1175
                buf_pool->LRU_old_len++;
1306
 
                buf_LRU_old_adjust_len(buf_pool);
 
1176
                buf_LRU_old_adjust_len();
1307
1177
 
1308
1178
        } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1309
1179
 
1310
1180
                /* The LRU list is now long enough for LRU_old to become
1311
1181
                defined: init it */
1312
1182
 
1313
 
                buf_LRU_old_init(buf_pool);
 
1183
                buf_LRU_old_init();
1314
1184
        } else {
1315
1185
                buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1316
1186
        }
1334
1204
                                LRU list is very short, the block is added to
1335
1205
                                the start, regardless of this parameter */
1336
1206
{
1337
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1338
 
 
1339
1207
        ut_ad(buf_pool);
1340
1208
        ut_ad(bpage);
1341
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1209
        ut_ad(buf_pool_mutex_own());
1342
1210
 
1343
1211
        ut_a(buf_page_in_file(bpage));
1344
1212
        ut_ad(!bpage->in_LRU_list);
1372
1240
                /* Adjust the length of the old block list if necessary */
1373
1241
 
1374
1242
                buf_page_set_old(bpage, old);
1375
 
                buf_LRU_old_adjust_len(buf_pool);
 
1243
                buf_LRU_old_adjust_len();
1376
1244
 
1377
1245
        } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {
1378
1246
 
1379
1247
                /* The LRU list is now long enough for LRU_old to become
1380
1248
                defined: init it */
1381
1249
 
1382
 
                buf_LRU_old_init(buf_pool);
 
1250
                buf_LRU_old_init();
1383
1251
        } else {
1384
1252
                buf_page_set_old(bpage, buf_pool->LRU_old != NULL);
1385
1253
        }
1415
1283
/*=====================*/
1416
1284
        buf_page_t*     bpage)  /*!< in: control block */
1417
1285
{
1418
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1419
 
 
1420
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1286
        ut_ad(buf_pool_mutex_own());
1421
1287
 
1422
1288
        if (bpage->old) {
1423
1289
                buf_pool->stat.n_pages_made_young++;
1443
1309
Try to free a block.  If bpage is a descriptor of a compressed-only
1444
1310
page, the descriptor object will be freed as well.
1445
1311
 
1446
 
NOTE: If this function returns BUF_LRU_FREED, it will temporarily
1447
 
release buf_pool->mutex.  Furthermore, the page frame will no longer be
 
1312
NOTE: If this function returns BUF_LRU_FREED, it will not temporarily
 
1313
release buf_pool_mutex.  Furthermore, the page frame will no longer be
1448
1314
accessible via bpage.
1449
1315
 
1450
 
The caller must hold buf_pool->mutex and buf_page_get_mutex(bpage) and
 
1316
The caller must hold buf_pool_mutex and buf_page_get_mutex(bpage) and
1451
1317
release these two mutexes after the call.  No other
1452
1318
buf_page_get_mutex() may be held when calling this function.
1453
1319
@return BUF_LRU_FREED if freed, BUF_LRU_CANNOT_RELOCATE or
1465
1331
                                was temporarily released, or NULL */
1466
1332
{
1467
1333
        buf_page_t*     b = NULL;
1468
 
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
1469
1334
        mutex_t*        block_mutex = buf_page_get_mutex(bpage);
1470
1335
 
1471
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1336
        ut_ad(buf_pool_mutex_own());
1472
1337
        ut_ad(mutex_own(block_mutex));
1473
1338
        ut_ad(buf_page_in_file(bpage));
1474
1339
        ut_ad(bpage->in_LRU_list);
1475
1340
        ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
1476
 
#if UNIV_WORD_SIZE == 4
1477
 
        /* On 32-bit systems, there is no padding in buf_page_t.  On
1478
 
        other systems, Valgrind could complain about uninitialized pad
1479
 
        bytes. */
1480
1341
        UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1481
 
#endif
1482
1342
 
1483
1343
        if (!buf_page_can_relocate(bpage)) {
1484
1344
 
1512
1372
                If it cannot be allocated (without freeing a block
1513
1373
                from the LRU list), refuse to free bpage. */
1514
1374
alloc:
1515
 
                buf_pool_mutex_exit_forbid(buf_pool);
1516
 
                b = static_cast<buf_page_t *>(buf_buddy_alloc(buf_pool, sizeof *b, NULL));
1517
 
                buf_pool_mutex_exit_allow(buf_pool);
 
1375
                buf_pool_mutex_exit_forbid();
 
1376
                b = buf_buddy_alloc(sizeof *b, NULL);
 
1377
                buf_pool_mutex_exit_allow();
1518
1378
 
1519
1379
                if (UNIV_UNLIKELY(!b)) {
1520
1380
                        return(BUF_LRU_CANNOT_RELOCATE);
1536
1396
                ut_a(bpage->buf_fix_count == 0);
1537
1397
 
1538
1398
                if (b) {
1539
 
                        buf_page_t*     hash_b;
1540
1399
                        buf_page_t*     prev_b  = UT_LIST_GET_PREV(LRU, b);
1541
 
 
1542
 
                        const ulint     fold = buf_page_address_fold(
 
1400
                        const ulint     fold    = buf_page_address_fold(
1543
1401
                                bpage->space, bpage->offset);
1544
1402
 
1545
 
                        hash_b  = buf_page_hash_get_low(
1546
 
                                buf_pool, bpage->space, bpage->offset, fold);
1547
 
 
1548
 
                        ut_a(!hash_b);
 
1403
                        ut_a(!buf_page_hash_get(bpage->space, bpage->offset));
1549
1404
 
1550
1405
                        b->state = b->oldest_modification
1551
1406
                                ? BUF_BLOCK_ZIP_DIRTY
1579
1434
 
1580
1435
                                ut_ad(prev_b->in_LRU_list);
1581
1436
                                ut_ad(buf_page_in_file(prev_b));
1582
 
#if UNIV_WORD_SIZE == 4
1583
 
                                /* On 32-bit systems, there is no
1584
 
                                padding in buf_page_t.  On other
1585
 
                                systems, Valgrind could complain about
1586
 
                                uninitialized pad bytes. */
1587
1437
                                UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b);
1588
 
#endif
 
1438
 
1589
1439
                                UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
1590
1440
                                                     prev_b, b);
1591
1441
 
1605
1455
                                        ut_ad(buf_pool->LRU_old);
1606
1456
                                        /* Adjust the length of the
1607
1457
                                        old block list if necessary */
1608
 
                                        buf_LRU_old_adjust_len(buf_pool);
 
1458
                                        buf_LRU_old_adjust_len();
1609
1459
                                } else if (lru_len == BUF_LRU_OLD_MIN_LEN) {
1610
1460
                                        /* The LRU list is now long
1611
1461
                                        enough for LRU_old to become
1612
1462
                                        defined: init it */
1613
 
                                        buf_LRU_old_init(buf_pool);
 
1463
                                        buf_LRU_old_init();
1614
1464
                                }
1615
1465
#ifdef UNIV_LRU_DEBUG
1616
1466
                                /* Check that the "old" flag is consistent
1625
1475
                        if (b->state == BUF_BLOCK_ZIP_PAGE) {
1626
1476
                                buf_LRU_insert_zip_clean(b);
1627
1477
                        } else {
1628
 
                                /* Relocate on buf_pool->flush_list. */
1629
 
                                buf_flush_relocate_on_flush_list(bpage, b);
 
1478
                                buf_page_t* prev;
 
1479
 
 
1480
                                ut_ad(b->in_flush_list);
 
1481
                                ut_d(bpage->in_flush_list = FALSE);
 
1482
 
 
1483
                                prev = UT_LIST_GET_PREV(list, b);
 
1484
                                UT_LIST_REMOVE(list, buf_pool->flush_list, b);
 
1485
 
 
1486
                                if (prev) {
 
1487
                                        ut_ad(prev->in_flush_list);
 
1488
                                        UT_LIST_INSERT_AFTER(
 
1489
                                                list,
 
1490
                                                buf_pool->flush_list,
 
1491
                                                prev, b);
 
1492
                                } else {
 
1493
                                        UT_LIST_ADD_FIRST(
 
1494
                                                list,
 
1495
                                                buf_pool->flush_list,
 
1496
                                                b);
 
1497
                                }
1630
1498
                        }
1631
1499
 
1632
1500
                        bpage->zip.data = NULL;
1634
1502
 
1635
1503
                        /* Prevent buf_page_get_gen() from
1636
1504
                        decompressing the block while we release
1637
 
                        buf_pool->mutex and block_mutex. */
 
1505
                        buf_pool_mutex and block_mutex. */
1638
1506
                        b->buf_fix_count++;
1639
1507
                        b->io_fix = BUF_IO_READ;
1640
1508
                }
1643
1511
                        *buf_pool_mutex_released = TRUE;
1644
1512
                }
1645
1513
 
1646
 
                buf_pool_mutex_exit(buf_pool);
 
1514
                buf_pool_mutex_exit();
1647
1515
                mutex_exit(block_mutex);
1648
1516
 
1649
1517
                /* Remove possible adaptive hash index on the page.
1675
1543
                                : BUF_NO_CHECKSUM_MAGIC);
1676
1544
                }
1677
1545
 
1678
 
                buf_pool_mutex_enter(buf_pool);
 
1546
                buf_pool_mutex_enter();
1679
1547
                mutex_enter(block_mutex);
1680
1548
 
1681
1549
                if (b) {
1682
 
                        mutex_enter(&buf_pool->zip_mutex);
 
1550
                        mutex_enter(&buf_pool_zip_mutex);
1683
1551
                        b->buf_fix_count--;
1684
1552
                        buf_page_set_io_fix(b, BUF_IO_NONE);
1685
 
                        mutex_exit(&buf_pool->zip_mutex);
 
1553
                        mutex_exit(&buf_pool_zip_mutex);
1686
1554
                }
1687
1555
 
1688
1556
                buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
1690
1558
                /* The block_mutex should have been released by
1691
1559
                buf_LRU_block_remove_hashed_page() when it returns
1692
1560
                BUF_BLOCK_ZIP_FREE. */
1693
 
                ut_ad(block_mutex == &buf_pool->zip_mutex);
 
1561
                ut_ad(block_mutex == &buf_pool_zip_mutex);
1694
1562
                mutex_enter(block_mutex);
1695
1563
        }
1696
1564
 
1705
1573
/*=============================*/
1706
1574
        buf_block_t*    block)  /*!< in: block, must not contain a file page */
1707
1575
{
1708
 
        void*           data;
1709
 
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
 
1576
        void*   data;
1710
1577
 
1711
1578
        ut_ad(block);
1712
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1579
        ut_ad(buf_pool_mutex_own());
1713
1580
        ut_ad(mutex_own(&block->mutex));
1714
1581
 
1715
1582
        switch (buf_block_get_state(block)) {
1743
1610
        if (data) {
1744
1611
                block->page.zip.data = NULL;
1745
1612
                mutex_exit(&block->mutex);
1746
 
                buf_pool_mutex_exit_forbid(buf_pool);
1747
 
 
1748
 
                buf_buddy_free(
1749
 
                        buf_pool, data, page_zip_get_size(&block->page.zip));
1750
 
 
1751
 
                buf_pool_mutex_exit_allow(buf_pool);
 
1613
                buf_pool_mutex_exit_forbid();
 
1614
                buf_buddy_free(data, page_zip_get_size(&block->page.zip));
 
1615
                buf_pool_mutex_exit_allow();
1752
1616
                mutex_enter(&block->mutex);
1753
1617
                page_zip_set_size(&block->page.zip, 0);
1754
1618
        }
1762
1626
/******************************************************************//**
1763
1627
Takes a block out of the LRU list and page hash table.
1764
1628
If the block is compressed-only (BUF_BLOCK_ZIP_PAGE),
1765
 
the object will be freed and buf_pool->zip_mutex will be released.
 
1629
the object will be freed and buf_pool_zip_mutex will be released.
1766
1630
 
1767
1631
If a compressed page or a compressed-only block descriptor is freed,
1768
1632
other compressed pages or compressed-only block descriptors may be
1779
1643
        ibool           zip)    /*!< in: TRUE if should remove also the
1780
1644
                                compressed page of an uncompressed page */
1781
1645
{
1782
 
        ulint                   fold;
1783
1646
        const buf_page_t*       hashed_bpage;
1784
 
        buf_pool_t*             buf_pool = buf_pool_from_bpage(bpage);
1785
 
 
1786
1647
        ut_ad(bpage);
1787
 
        ut_ad(buf_pool_mutex_own(buf_pool));
 
1648
        ut_ad(buf_pool_mutex_own());
1788
1649
        ut_ad(mutex_own(buf_page_get_mutex(bpage)));
1789
1650
 
1790
1651
        ut_a(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
1791
1652
        ut_a(bpage->buf_fix_count == 0);
1792
1653
 
1793
 
#if UNIV_WORD_SIZE == 4
1794
 
        /* On 32-bit systems, there is no padding in
1795
 
        buf_page_t.  On other systems, Valgrind could complain
1796
 
        about uninitialized pad bytes. */
1797
1654
        UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
1798
 
#endif
1799
1655
 
1800
1656
        buf_LRU_remove_block(bpage);
1801
1657
 
1870
1726
                break;
1871
1727
        }
1872
1728
 
1873
 
        fold = buf_page_address_fold(bpage->space, bpage->offset);
1874
 
        hashed_bpage = buf_page_hash_get_low(
1875
 
                buf_pool, bpage->space, bpage->offset, fold);
 
1729
        hashed_bpage = buf_page_hash_get(bpage->space, bpage->offset);
1876
1730
 
1877
1731
        if (UNIV_UNLIKELY(bpage != hashed_bpage)) {
1878
1732
                fprintf(stderr,
1892
1746
 
1893
1747
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
1894
1748
                mutex_exit(buf_page_get_mutex(bpage));
1895
 
                buf_pool_mutex_exit(buf_pool);
 
1749
                buf_pool_mutex_exit();
1896
1750
                buf_print();
1897
1751
                buf_LRU_print();
1898
1752
                buf_validate();
1904
1758
        ut_ad(!bpage->in_zip_hash);
1905
1759
        ut_ad(bpage->in_page_hash);
1906
1760
        ut_d(bpage->in_page_hash = FALSE);
1907
 
        HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, bpage);
 
1761
        HASH_DELETE(buf_page_t, hash, buf_pool->page_hash,
 
1762
                    buf_page_address_fold(bpage->space, bpage->offset),
 
1763
                    bpage);
1908
1764
        switch (buf_page_get_state(bpage)) {
1909
1765
        case BUF_BLOCK_ZIP_PAGE:
1910
1766
                ut_ad(!bpage->in_free_list);
1915
1771
 
1916
1772
                UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
1917
1773
 
1918
 
                mutex_exit(&buf_pool->zip_mutex);
1919
 
                buf_pool_mutex_exit_forbid(buf_pool);
1920
 
 
1921
 
                buf_buddy_free(
1922
 
                        buf_pool, bpage->zip.data,
1923
 
                        page_zip_get_size(&bpage->zip));
1924
 
 
1925
 
                buf_buddy_free(buf_pool, bpage, sizeof(*bpage));
1926
 
                buf_pool_mutex_exit_allow(buf_pool);
1927
 
 
 
1774
                mutex_exit(&buf_pool_zip_mutex);
 
1775
                buf_pool_mutex_exit_forbid();
 
1776
                buf_buddy_free(bpage->zip.data,
 
1777
                               page_zip_get_size(&bpage->zip));
 
1778
                buf_buddy_free(bpage, sizeof(*bpage));
 
1779
                buf_pool_mutex_exit_allow();
1928
1780
                UNIV_MEM_UNDESC(bpage);
1929
1781
                return(BUF_BLOCK_ZIP_FREE);
1930
1782
 
1946
1798
                        ut_ad(!bpage->in_flush_list);
1947
1799
                        ut_ad(!bpage->in_LRU_list);
1948
1800
                        mutex_exit(&((buf_block_t*) bpage)->mutex);
1949
 
                        buf_pool_mutex_exit_forbid(buf_pool);
1950
 
 
1951
 
                        buf_buddy_free(
1952
 
                                buf_pool, data,
1953
 
                                page_zip_get_size(&bpage->zip));
1954
 
 
1955
 
                        buf_pool_mutex_exit_allow(buf_pool);
 
1801
                        buf_pool_mutex_exit_forbid();
 
1802
                        buf_buddy_free(data, page_zip_get_size(&bpage->zip));
 
1803
                        buf_pool_mutex_exit_allow();
1956
1804
                        mutex_enter(&((buf_block_t*) bpage)->mutex);
1957
1805
                        page_zip_set_size(&bpage->zip, 0);
1958
1806
                }
1981
1829
        buf_block_t*    block)  /*!< in: block, must contain a file page and
1982
1830
                                be in a state where it can be freed */
1983
1831
{
1984
 
#ifdef UNIV_DEBUG
1985
 
        buf_pool_t*     buf_pool = buf_pool_from_block(block);
1986
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1987
 
#endif
 
1832
        ut_ad(buf_pool_mutex_own());
1988
1833
        ut_ad(mutex_own(&block->mutex));
1989
1834
 
1990
1835
        buf_block_set_state(block, BUF_BLOCK_MEMORY);
1993
1838
}
1994
1839
 
1995
1840
/**********************************************************************//**
1996
 
Updates buf_pool->LRU_old_ratio for one buffer pool instance.
 
1841
Updates buf_LRU_old_ratio.
1997
1842
@return updated old_pct */
1998
 
static
 
1843
UNIV_INTERN
1999
1844
uint
2000
 
buf_LRU_old_ratio_update_instance(
2001
 
/*==============================*/
2002
 
        buf_pool_t*     buf_pool,/*!< in: buffer pool instance */
2003
 
        uint            old_pct,/*!< in: Reserve this percentage of
2004
 
                                the buffer pool for "old" blocks. */
2005
 
        ibool           adjust) /*!< in: TRUE=adjust the LRU list;
2006
 
                                FALSE=just assign buf_pool->LRU_old_ratio
2007
 
                                during the initialization of InnoDB */
 
1845
buf_LRU_old_ratio_update(
 
1846
/*=====================*/
 
1847
        uint    old_pct,/*!< in: Reserve this percentage of
 
1848
                        the buffer pool for "old" blocks. */
 
1849
        ibool   adjust) /*!< in: TRUE=adjust the LRU list;
 
1850
                        FALSE=just assign buf_LRU_old_ratio
 
1851
                        during the initialization of InnoDB */
2008
1852
{
2009
1853
        uint    ratio;
2010
1854
 
2016
1860
        }
2017
1861
 
2018
1862
        if (adjust) {
2019
 
                buf_pool_mutex_enter(buf_pool);
 
1863
                buf_pool_mutex_enter();
2020
1864
 
2021
 
                if (ratio != buf_pool->LRU_old_ratio) {
2022
 
                        buf_pool->LRU_old_ratio = ratio;
 
1865
                if (ratio != buf_LRU_old_ratio) {
 
1866
                        buf_LRU_old_ratio = ratio;
2023
1867
 
2024
1868
                        if (UT_LIST_GET_LEN(buf_pool->LRU)
2025
 
                           >= BUF_LRU_OLD_MIN_LEN) {
2026
 
 
2027
 
                                buf_LRU_old_adjust_len(buf_pool);
 
1869
                            >= BUF_LRU_OLD_MIN_LEN) {
 
1870
                                buf_LRU_old_adjust_len();
2028
1871
                        }
2029
1872
                }
2030
1873
 
2031
 
                buf_pool_mutex_exit(buf_pool);
 
1874
                buf_pool_mutex_exit();
2032
1875
        } else {
2033
 
                buf_pool->LRU_old_ratio = ratio;
 
1876
                buf_LRU_old_ratio = ratio;
2034
1877
        }
 
1878
 
2035
1879
        /* the reverse of 
2036
1880
        ratio = old_pct * BUF_LRU_OLD_RATIO_DIV / 100 */
2037
1881
        return((uint) (ratio * 100 / (double) BUF_LRU_OLD_RATIO_DIV + 0.5));
2038
1882
}
2039
1883
 
2040
 
/**********************************************************************//**
2041
 
Updates buf_pool->LRU_old_ratio.
2042
 
@return updated old_pct */
2043
 
UNIV_INTERN
2044
 
ulint
2045
 
buf_LRU_old_ratio_update(
2046
 
/*=====================*/
2047
 
        uint    old_pct,/*!< in: Reserve this percentage of
2048
 
                        the buffer pool for "old" blocks. */
2049
 
        ibool   adjust) /*!< in: TRUE=adjust the LRU list;
2050
 
                        FALSE=just assign buf_pool->LRU_old_ratio
2051
 
                        during the initialization of InnoDB */
2052
 
{
2053
 
        ulint   i;
2054
 
        ulint   new_ratio = 0;
2055
 
 
2056
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2057
 
                buf_pool_t*     buf_pool;
2058
 
 
2059
 
                buf_pool = buf_pool_from_array(i);
2060
 
 
2061
 
                new_ratio = buf_LRU_old_ratio_update_instance(
2062
 
                        buf_pool, old_pct, adjust);
2063
 
        }
2064
 
 
2065
 
        return(new_ratio);
2066
 
}
2067
 
 
2068
1884
/********************************************************************//**
2069
1885
Update the historical stats that we are collecting for LRU eviction
2070
1886
policy at the end of each interval. */
2073
1889
buf_LRU_stat_update(void)
2074
1890
/*=====================*/
2075
1891
{
2076
 
        ulint           i;
2077
1892
        buf_LRU_stat_t* item;
2078
 
        buf_pool_t*     buf_pool;
2079
 
        ibool           evict_started = FALSE;
2080
1893
 
2081
1894
        /* If we haven't started eviction yet then don't update stats. */
2082
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2083
 
 
2084
 
                buf_pool = buf_pool_from_array(i);
2085
 
 
2086
 
                if (buf_pool->freed_page_clock != 0) {
2087
 
                        evict_started = TRUE;
2088
 
                        break;
2089
 
                }
2090
 
        }
2091
 
 
2092
 
        if (!evict_started) {
 
1895
        if (buf_pool->freed_page_clock == 0) {
2093
1896
                goto func_exit;
2094
1897
        }
2095
1898
 
 
1899
        buf_pool_mutex_enter();
 
1900
 
2096
1901
        /* Update the index. */
2097
1902
        item = &buf_LRU_stat_arr[buf_LRU_stat_arr_ind];
2098
1903
        buf_LRU_stat_arr_ind++;
2105
1910
        /* Put current entry in the array. */
2106
1911
        memcpy(item, &buf_LRU_stat_cur, sizeof *item);
2107
1912
 
 
1913
        buf_pool_mutex_exit();
 
1914
 
2108
1915
func_exit:
2109
1916
        /* Clear the current entry. */
2110
1917
        memset(&buf_LRU_stat_cur, 0, sizeof buf_LRU_stat_cur);
2112
1919
 
2113
1920
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2114
1921
/**********************************************************************//**
2115
 
Validates the LRU list for one buffer pool instance. */
2116
 
static
2117
 
void
2118
 
buf_LRU_validate_instance(
2119
 
/*======================*/
2120
 
        buf_pool_t*     buf_pool)
 
1922
Validates the LRU list.
 
1923
@return TRUE */
 
1924
UNIV_INTERN
 
1925
ibool
 
1926
buf_LRU_validate(void)
 
1927
/*==================*/
2121
1928
{
2122
1929
        buf_page_t*     bpage;
2123
1930
        buf_block_t*    block;
2125
1932
        ulint           new_len;
2126
1933
 
2127
1934
        ut_ad(buf_pool);
2128
 
        buf_pool_mutex_enter(buf_pool);
 
1935
        buf_pool_mutex_enter();
2129
1936
 
2130
1937
        if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {
2131
1938
 
2132
1939
                ut_a(buf_pool->LRU_old);
2133
1940
                old_len = buf_pool->LRU_old_len;
2134
1941
                new_len = ut_min(UT_LIST_GET_LEN(buf_pool->LRU)
2135
 
                                 * buf_pool->LRU_old_ratio
2136
 
                                 / BUF_LRU_OLD_RATIO_DIV,
 
1942
                                 * buf_LRU_old_ratio / BUF_LRU_OLD_RATIO_DIV,
2137
1943
                                 UT_LIST_GET_LEN(buf_pool->LRU)
2138
1944
                                 - (BUF_LRU_OLD_TOLERANCE
2139
1945
                                    + BUF_LRU_NON_OLD_MIN_LEN));
2209
2015
                ut_a(buf_page_belongs_to_unzip_LRU(&block->page));
2210
2016
        }
2211
2017
 
2212
 
        buf_pool_mutex_exit(buf_pool);
2213
 
}
2214
 
 
2215
 
/**********************************************************************//**
2216
 
Validates the LRU list.
2217
 
@return TRUE */
2218
 
UNIV_INTERN
2219
 
ibool
2220
 
buf_LRU_validate(void)
2221
 
/*==================*/
2222
 
{
2223
 
        ulint   i;
2224
 
 
2225
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2226
 
                buf_pool_t*     buf_pool;
2227
 
 
2228
 
                buf_pool = buf_pool_from_array(i);
2229
 
                buf_LRU_validate_instance(buf_pool);
2230
 
        }
2231
 
 
 
2018
        buf_pool_mutex_exit();
2232
2019
        return(TRUE);
2233
2020
}
2234
2021
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
2235
2022
 
2236
2023
#if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
2237
2024
/**********************************************************************//**
2238
 
Prints the LRU list for one buffer pool instance. */
 
2025
Prints the LRU list. */
2239
2026
UNIV_INTERN
2240
2027
void
2241
 
buf_LRU_print_instance(
2242
 
/*===================*/
2243
 
        buf_pool_t*     buf_pool)
 
2028
buf_LRU_print(void)
 
2029
/*===============*/
2244
2030
{
2245
2031
        const buf_page_t*       bpage;
2246
2032
 
2247
2033
        ut_ad(buf_pool);
2248
 
        buf_pool_mutex_enter(buf_pool);
 
2034
        buf_pool_mutex_enter();
2249
2035
 
2250
2036
        bpage = UT_LIST_GET_FIRST(buf_pool->LRU);
2251
2037
 
2252
2038
        while (bpage != NULL) {
2253
2039
 
2254
 
                mutex_enter(buf_page_get_mutex(bpage));
2255
2040
                fprintf(stderr, "BLOCK space %lu page %lu ",
2256
2041
                        (ulong) buf_page_get_space(bpage),
2257
2042
                        (ulong) buf_page_get_page_no(bpage));
2279
2064
                case BUF_BLOCK_FILE_PAGE:
2280
2065
                        frame = buf_block_get_frame((buf_block_t*) bpage);
2281
2066
                        fprintf(stderr, "\ntype %lu"
2282
 
                                " index id %llu\n",
 
2067
                                " index id %lu\n",
2283
2068
                                (ulong) fil_page_get_type(frame),
2284
 
                                (ullint) btr_page_get_index_id(frame));
 
2069
                                (ulong) ut_dulint_get_low(
 
2070
                                        btr_page_get_index_id(frame)));
2285
2071
                        break;
2286
2072
                case BUF_BLOCK_ZIP_PAGE:
2287
2073
                        frame = bpage->zip.data;
2288
2074
                        fprintf(stderr, "\ntype %lu size %lu"
2289
 
                                " index id %llu\n",
 
2075
                                " index id %lu\n",
2290
2076
                                (ulong) fil_page_get_type(frame),
2291
2077
                                (ulong) buf_page_get_zip_size(bpage),
2292
 
                                (ullint) btr_page_get_index_id(frame));
 
2078
                                (ulong) ut_dulint_get_low(
 
2079
                                        btr_page_get_index_id(frame)));
2293
2080
                        break;
2294
2081
 
2295
2082
                default:
2298
2085
                        break;
2299
2086
                }
2300
2087
 
2301
 
                mutex_exit(buf_page_get_mutex(bpage));
2302
2088
                bpage = UT_LIST_GET_NEXT(LRU, bpage);
2303
2089
        }
2304
2090
 
2305
 
        buf_pool_mutex_exit(buf_pool);
2306
 
}
2307
 
 
2308
 
/**********************************************************************//**
2309
 
Prints the LRU list. */
2310
 
UNIV_INTERN
2311
 
void
2312
 
buf_LRU_print(void)
2313
 
/*===============*/
2314
 
{
2315
 
        ulint           i;
2316
 
        buf_pool_t*     buf_pool;
2317
 
 
2318
 
        for (i = 0; i < srv_buf_pool_instances; i++) {
2319
 
                buf_pool = buf_pool_from_array(i);
2320
 
                buf_LRU_print_instance(buf_pool);
2321
 
        }
 
2091
        buf_pool_mutex_exit();
2322
2092
}
2323
2093
#endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */