~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/innobase/buf/buf0flu.c

  • Committer: Lee Bieber
  • Date: 2010-12-02 18:56:33 UTC
  • mfrom: (1966.3.1 bug683842)
  • mto: This revision was merged to the branch mainline in revision 1969.
  • Revision ID: kalebral@gmail.com-20101202185633-e27o1zhpev18dlsn
Merge Monty - fix bug 683842: remove generic catch blocks       

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
1
/*****************************************************************************
2
2
 
3
 
Copyright (C) 1995, 2010, Innobase Oy. All Rights Reserved.
 
3
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved.
4
4
 
5
5
This program is free software; you can redistribute it and/or modify it under
6
6
the terms of the GNU General Public License as published by the Free Software
114
114
        p_node = rbt_prev(buf_pool->flush_rbt, c_node);
115
115
 
116
116
        if (p_node != NULL) {
117
 
                buf_page_t**    value;
118
 
                value = rbt_value(buf_page_t*, p_node);
119
 
                prev = *value;
 
117
                prev = *rbt_value(buf_page_t*, p_node);
120
118
                ut_a(prev != NULL);
121
119
        }
122
120
 
131
129
/*============================*/
132
130
        buf_page_t*     bpage)  /*!< in: bpage to be removed. */
133
131
{
134
 
#ifdef UNIV_DEBUG
135
132
        ibool           ret = FALSE;
136
 
#endif /* UNIV_DEBUG */
137
133
        buf_pool_t*     buf_pool = buf_pool_from_bpage(bpage);
138
134
 
139
135
        ut_ad(buf_flush_list_mutex_own(buf_pool));
140
136
 
141
 
#ifdef UNIV_DEBUG
142
 
        ret =
143
 
#endif /* UNIV_DEBUG */
144
 
        rbt_delete(buf_pool->flush_rbt, &bpage);
 
137
        ret = rbt_delete(buf_pool->flush_rbt, &bpage);
145
138
        ut_ad(ret);
146
139
}
147
140
 
329
322
 
330
323
        buf_flush_list_mutex_enter(buf_pool);
331
324
 
332
 
        /* The field in_LRU_list is protected by buf_pool->mutex, which
 
325
        /* The field in_LRU_list is protected by buf_pool_mutex, which
333
326
        we are not holding.  However, while a block is in the flush
334
327
        list, it is dirty and cannot be discarded, not from the
335
328
        page_hash or from the LRU list.  At most, the uncompressed
992
985
        ut_ad(page);
993
986
 
994
987
        if (page_zip_) {
995
 
                page_zip_des_t* page_zip = static_cast<page_zip_des_t *>(page_zip_);
 
988
                page_zip_des_t* page_zip = page_zip_;
996
989
                ulint           zip_size = page_zip_get_size(page_zip);
997
990
                ut_ad(zip_size);
998
991
                ut_ad(ut_is_2pow(zip_size));
1010
1003
                case FIL_PAGE_TYPE_ZBLOB:
1011
1004
                case FIL_PAGE_TYPE_ZBLOB2:
1012
1005
                case FIL_PAGE_INDEX:
1013
 
                        mach_write_to_8(page_zip->data
1014
 
                                        + FIL_PAGE_LSN, newest_lsn);
 
1006
                        mach_write_ull(page_zip->data
 
1007
                                       + FIL_PAGE_LSN, newest_lsn);
1015
1008
                        memset(page_zip->data + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);
1016
1009
                        mach_write_to_4(page_zip->data
1017
1010
                                        + FIL_PAGE_SPACE_OR_CHKSUM,
1033
1026
        }
1034
1027
 
1035
1028
        /* Write the newest modification lsn to the page header and trailer */
1036
 
        mach_write_to_8(page + FIL_PAGE_LSN, newest_lsn);
 
1029
        mach_write_ull(page + FIL_PAGE_LSN, newest_lsn);
1037
1030
 
1038
 
        mach_write_to_8(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
1039
 
                        newest_lsn);
 
1031
        mach_write_ull(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
 
1032
                       newest_lsn);
1040
1033
 
1041
1034
        /* Store the new formula checksum */
1042
1035
 
1081
1074
 
1082
1075
        ut_ad(buf_page_in_file(bpage));
1083
1076
 
1084
 
        /* We are not holding buf_pool->mutex or block_mutex here.
 
1077
        /* We are not holding buf_pool_mutex or block_mutex here.
1085
1078
        Nevertheless, it is safe to access bpage, because it is
1086
1079
        io_fixed and oldest_modification != 0.  Thus, it cannot be
1087
1080
        relocated in the buffer pool or removed from flush_list or
1124
1117
                        ut_a(mach_read_from_4(frame + FIL_PAGE_SPACE_OR_CHKSUM)
1125
1118
                             == page_zip_calc_checksum(frame, zip_size));
1126
1119
                }
1127
 
                mach_write_to_8(frame + FIL_PAGE_LSN,
1128
 
                                bpage->newest_modification);
 
1120
                mach_write_ull(frame + FIL_PAGE_LSN,
 
1121
                               bpage->newest_modification);
1129
1122
                memset(frame + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);
1130
1123
                break;
1131
1124
        case BUF_BLOCK_FILE_PAGE:
1152
1145
        }
1153
1146
}
1154
1147
 
1155
 
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
1156
 
/********************************************************************//**
1157
 
Writes a flushable page asynchronously from the buffer pool to a file.
1158
 
NOTE: buf_pool->mutex and block->mutex must be held upon entering this
1159
 
function, and they will be released by this function after flushing.
1160
 
This is loosely based on buf_flush_batch() and buf_flush_page().
1161
 
@return TRUE if the page was flushed and the mutexes released */
1162
 
UNIV_INTERN
1163
 
ibool
1164
 
buf_flush_page_try(
1165
 
/*===============*/
1166
 
        buf_pool_t*     buf_pool,       /*!< in/out: buffer pool instance */
1167
 
        buf_block_t*    block)          /*!< in/out: buffer control block */
1168
 
{
1169
 
        ut_ad(buf_pool_mutex_own(buf_pool));
1170
 
        ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1171
 
        ut_ad(mutex_own(&block->mutex));
1172
 
 
1173
 
        if (!buf_flush_ready_for_flush(&block->page, BUF_FLUSH_LRU)) {
1174
 
                return(FALSE);
1175
 
        }
1176
 
 
1177
 
        if (buf_pool->n_flush[BUF_FLUSH_LRU] > 0
1178
 
            || buf_pool->init_flush[BUF_FLUSH_LRU]) {
1179
 
                /* There is already a flush batch of the same type running */
1180
 
                return(FALSE);
1181
 
        }
1182
 
 
1183
 
        buf_pool->init_flush[BUF_FLUSH_LRU] = TRUE;
1184
 
 
1185
 
        buf_page_set_io_fix(&block->page, BUF_IO_WRITE);
1186
 
 
1187
 
        buf_page_set_flush_type(&block->page, BUF_FLUSH_LRU);
1188
 
 
1189
 
        if (buf_pool->n_flush[BUF_FLUSH_LRU]++ == 0) {
1190
 
 
1191
 
                os_event_reset(buf_pool->no_flush[BUF_FLUSH_LRU]);
1192
 
        }
1193
 
 
1194
 
        /* VERY IMPORTANT:
1195
 
        Because any thread may call the LRU flush, even when owning
1196
 
        locks on pages, to avoid deadlocks, we must make sure that the
1197
 
        s-lock is acquired on the page without waiting: this is
1198
 
        accomplished because buf_flush_ready_for_flush() must hold,
1199
 
        and that requires the page not to be bufferfixed. */
1200
 
 
1201
 
        rw_lock_s_lock_gen(&block->lock, BUF_IO_WRITE);
1202
 
 
1203
 
        /* Note that the s-latch is acquired before releasing the
1204
 
        buf_pool mutex: this ensures that the latch is acquired
1205
 
        immediately. */
1206
 
 
1207
 
        mutex_exit(&block->mutex);
1208
 
        buf_pool_mutex_exit(buf_pool);
1209
 
 
1210
 
        /* Even though block is not protected by any mutex at this
1211
 
        point, it is safe to access block, because it is io_fixed and
1212
 
        oldest_modification != 0.  Thus, it cannot be relocated in the
1213
 
        buffer pool or removed from flush_list or LRU_list. */
1214
 
 
1215
 
        buf_flush_write_block_low(&block->page);
1216
 
 
1217
 
        buf_pool_mutex_enter(buf_pool);
1218
 
        buf_pool->init_flush[BUF_FLUSH_LRU] = FALSE;
1219
 
 
1220
 
        if (buf_pool->n_flush[BUF_FLUSH_LRU] == 0) {
1221
 
                /* The running flush batch has ended */
1222
 
                os_event_set(buf_pool->no_flush[BUF_FLUSH_LRU]);
1223
 
        }
1224
 
 
1225
 
        buf_pool_mutex_exit(buf_pool);
1226
 
        buf_flush_buffered_writes();
1227
 
 
1228
 
        return(TRUE);
1229
 
}
1230
 
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
1231
 
 
1232
1148
/********************************************************************//**
1233
1149
Writes a flushable page asynchronously from the buffer pool to a file.
1234
1150
NOTE: in simulated aio we must call
1353
1269
/*====================*/
1354
1270
        ulint           space,          /*!< in: space id */
1355
1271
        ulint           offset,         /*!< in: page offset */
1356
 
        enum buf_flush  flush_type,     /*!< in: BUF_FLUSH_LRU or
 
1272
        enum buf_flush  flush_type)     /*!< in: BUF_FLUSH_LRU or
1357
1273
                                        BUF_FLUSH_LIST */
1358
 
        ulint           n_flushed,      /*!< in: number of pages
1359
 
                                        flushed so far in this batch */
1360
 
        ulint           n_to_flush)     /*!< in: maximum number of pages
1361
 
                                        we are allowed to flush */
1362
1274
{
1363
1275
        ulint           i;
1364
1276
        ulint           low;
1399
1311
 
1400
1312
                buf_page_t*     bpage;
1401
1313
 
1402
 
                if ((count + n_flushed) >= n_to_flush) {
1403
 
 
1404
 
                        /* We have already flushed enough pages and
1405
 
                        should call it a day. There is, however, one
1406
 
                        exception. If the page whose neighbors we
1407
 
                        are flushing has not been flushed yet then
1408
 
                        we'll try to flush the victim that we
1409
 
                        selected originally. */
1410
 
                        if (i <= offset) {
1411
 
                                i = offset;
1412
 
                        } else {
1413
 
                                break;
1414
 
                        }
1415
 
                }
1416
 
 
1417
1314
                buf_pool = buf_pool_get(space, i);
1418
1315
 
1419
1316
                buf_pool_mutex_enter(buf_pool);
1481
1378
                                        buf_page_in_file(bpage) */
1482
1379
        enum buf_flush  flush_type,     /*!< in: BUF_FLUSH_LRU
1483
1380
                                        or BUF_FLUSH_LIST */
1484
 
        ulint           n_to_flush,     /*!< in: number of pages to
1485
 
                                        flush */
1486
1381
        ulint*          count)          /*!< in/out: number of pages
1487
1382
                                        flushed */
1488
1383
{
1516
1411
                mutex_exit(block_mutex);
1517
1412
 
1518
1413
                /* Try to flush also all the neighbors */
1519
 
                *count += buf_flush_try_neighbors(space,
1520
 
                                                  offset,
1521
 
                                                  flush_type,
1522
 
                                                  *count,
1523
 
                                                  n_to_flush);
 
1414
                *count += buf_flush_try_neighbors(space, offset, flush_type);
1524
1415
 
1525
1416
                buf_pool_mutex_enter(buf_pool);
1526
1417
                flushed = TRUE;
1560
1451
                a page that isn't ready for flushing. */
1561
1452
                while (bpage != NULL
1562
1453
                       && !buf_flush_page_and_try_neighbors(
1563
 
                                bpage, BUF_FLUSH_LRU, max, &count)) {
 
1454
                                bpage, BUF_FLUSH_LRU, &count)) {
1564
1455
 
1565
1456
                        bpage = UT_LIST_GET_PREV(LRU, bpage);
1566
1457
                }
1641
1532
                while (bpage != NULL
1642
1533
                       && len > 0
1643
1534
                       && !buf_flush_page_and_try_neighbors(
1644
 
                                bpage, BUF_FLUSH_LIST, min_n, &count)) {
 
1535
                                bpage, BUF_FLUSH_LIST, &count)) {
1645
1536
 
1646
1537
                        buf_flush_list_mutex_enter(buf_pool);
1647
1538
 
1845
1736
                ulint   i;
1846
1737
 
1847
1738
                for (i = 0; i < srv_buf_pool_instances; ++i) {
1848
 
                        buf_pool_t*     i_buf_pool = buf_pool_from_array(i);
1849
 
 
1850
 
                        os_event_wait(i_buf_pool->no_flush[type]);
 
1739
                        buf_pool_t*     buf_pool;
 
1740
 
 
1741
                        buf_pool = buf_pool_from_array(i);
 
1742
 
 
1743
                        os_event_wait(buf_pool->no_flush[type]);
1851
1744
                }
1852
1745
        } else {
1853
1746
                os_event_wait(buf_pool->no_flush[type]);
2207
2100
 
2208
2101
                ut_ad(bpage->in_flush_list);
2209
2102
 
2210
 
                /* A page in buf_pool->flush_list can be in
2211
 
                BUF_BLOCK_REMOVE_HASH state. This happens when a page
2212
 
                is in the middle of being relocated. In that case the
2213
 
                original descriptor can have this state and still be
2214
 
                in the flush list waiting to acquire the
2215
 
                buf_pool->flush_list_mutex to complete the relocation. */
 
2103
                /* A page in flush_list can be in BUF_BLOCK_REMOVE_HASH
 
2104
                state. This happens when a page is in the middle of
 
2105
                being relocated. In that case the original descriptor
 
2106
                can have this state and still be in the flush list
 
2107
                waiting to acquire the flush_list_mutex to complete
 
2108
                the relocation. */
2216
2109
                ut_a(buf_page_in_file(bpage)
2217
2110
                     || buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
2218
2111
                ut_a(om > 0);
2219
2112
 
2220
2113
                if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
2221
 
                        buf_page_t** prpage;
 
2114
                        buf_page_t* rpage;
2222
2115
 
2223
2116
                        ut_a(rnode);
2224
 
                        prpage = rbt_value(buf_page_t*, rnode);
 
2117
                        rpage = *rbt_value(buf_page_t*, rnode);
2225
2118
 
2226
 
                        ut_a(*prpage);
2227
 
                        ut_a(*prpage == bpage);
 
2119
                        ut_a(rpage);
 
2120
                        ut_a(rpage == bpage);
2228
2121
                        rnode = rbt_next(buf_pool->flush_rbt, rnode);
2229
2122
                }
2230
2123