133
130
situation, which theoretically should not happen;
134
131
to set timeout equal to <T> seconds add
135
132
#define KEYCACHE_TIMEOUT <T>
133
- to enable the module traps and to send debug information from
134
key cache module to a special debug log add:
135
#define KEYCACHE_DEBUG
136
the name of this debug log file <LOG NAME> can be set through:
137
#define KEYCACHE_DEBUG_LOG <LOG NAME>
138
if the name is not defined, it's set by default;
139
if the KEYCACHE_DEBUG flag is not set up and we are in a debug
140
mode, i.e. when ! defined(DBUG_OFF), the debug information from the
141
module is sent to the regular debug log.
137
143
Example of the settings:
138
144
#define SERIALIZED_READ_FROM_CACHE
139
145
#define MAX_THREADS 100
140
146
#define KEYCACHE_TIMEOUT 1
147
#define KEYCACHE_DEBUG
148
#define KEYCACHE_DEBUG_LOG "my_key_cache_debug.log"
143
151
#define STRUCT_PTR(TYPE, MEMBER, a) \
195
204
*next_changed, **prev_changed; /* for lists of file dirty/clean blocks */
196
205
struct st_hash_link *hash_link; /* backward ptr to referring hash_link */
197
206
KEYCACHE_WQUEUE wqueue[2]; /* queues on waiting requests for new/old pages */
198
uint32_t requests; /* number of requests for the block */
199
unsigned char *buffer; /* buffer for the block page */
200
uint32_t offset; /* beginning of modified data in the buffer */
201
uint32_t length; /* end of data in the buffer */
202
uint32_t status; /* state of the block */
207
uint requests; /* number of requests for the block */
208
uchar *buffer; /* buffer for the block page */
209
uint offset; /* beginning of modified data in the buffer */
210
uint length; /* end of data in the buffer */
211
uint status; /* state of the block */
203
212
enum BLOCK_TEMPERATURE temperature; /* block temperature: cold, warm, hot */
204
uint32_t hits_left; /* number of hits left until promotion */
205
uint64_t last_hit_time; /* timestamp of the last hit */
213
uint hits_left; /* number of hits left until promotion */
214
ulonglong last_hit_time; /* timestamp of the last hit */
206
215
KEYCACHE_CONDVAR *condvar; /* condition variable for 'no readers' event */
212
221
#define FLUSH_CACHE 2000 /* sort this many blocks at once */
214
223
static int flush_all_key_blocks(KEY_CACHE *keycache);
215
225
static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
216
226
pthread_mutex_t *mutex);
217
227
static void release_whole_queue(KEYCACHE_WQUEUE *wqueue);
229
#define wait_on_queue(wqueue, mutex) do {} while (0)
230
#define release_whole_queue(wqueue) do {} while (0)
218
232
static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block);
233
#if !defined(DBUG_OFF)
234
static void test_key_cache(KEY_CACHE *keycache,
235
const char *where, my_bool lock);
220
238
#define KEYCACHE_HASH(f, pos) \
221
(((uint32_t) ((pos) / keycache->key_cache_block_size) + \
222
(uint32_t) (f)) & (keycache->hash_entries-1))
239
(((ulong) ((pos) / keycache->key_cache_block_size) + \
240
(ulong) (f)) & (keycache->hash_entries-1))
223
241
#define FILE_HASH(f) ((uint) (f) & (CHANGED_BLOCKS_HASH-1))
226
#ifdef KEYCACHE_TIMEOUT
243
#define DEFAULT_KEYCACHE_DEBUG_LOG "keycache_debug.log"
245
#if defined(KEYCACHE_DEBUG) && ! defined(KEYCACHE_DEBUG_LOG)
246
#define KEYCACHE_DEBUG_LOG DEFAULT_KEYCACHE_DEBUG_LOG
249
#if defined(KEYCACHE_DEBUG_LOG)
250
static FILE *keycache_debug_log=NULL;
251
static void keycache_debug_print _VARARGS((const char *fmt,...));
252
#define KEYCACHE_DEBUG_OPEN \
253
if (!keycache_debug_log) \
255
keycache_debug_log= fopen(KEYCACHE_DEBUG_LOG, "w"); \
256
(void) setvbuf(keycache_debug_log, NULL, _IOLBF, BUFSIZ); \
259
#define KEYCACHE_DEBUG_CLOSE \
260
if (keycache_debug_log) \
262
fclose(keycache_debug_log); \
263
keycache_debug_log= 0; \
266
#define KEYCACHE_DEBUG_OPEN
267
#define KEYCACHE_DEBUG_CLOSE
268
#endif /* defined(KEYCACHE_DEBUG_LOG) */
270
#if defined(KEYCACHE_DEBUG_LOG) && defined(KEYCACHE_DEBUG)
271
#define KEYCACHE_DBUG_PRINT(l, m) \
272
{ if (keycache_debug_log) fprintf(keycache_debug_log, "%s: ", l); \
273
keycache_debug_print m; }
275
#define KEYCACHE_DBUG_ASSERT(a) \
276
{ if (! (a) && keycache_debug_log) fclose(keycache_debug_log); \
279
#define KEYCACHE_DBUG_PRINT(l, m) DBUG_PRINT(l, m)
280
#define KEYCACHE_DBUG_ASSERT(a) DBUG_ASSERT(a)
281
#endif /* defined(KEYCACHE_DEBUG_LOG) && defined(KEYCACHE_DEBUG) */
283
#if defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF)
285
static long keycache_thread_id;
286
#define KEYCACHE_THREAD_TRACE(l) \
287
KEYCACHE_DBUG_PRINT(l,("|thread %ld",keycache_thread_id))
289
#define KEYCACHE_THREAD_TRACE_BEGIN(l) \
290
{ struct st_my_thread_var *thread_var= my_thread_var; \
291
keycache_thread_id= thread_var->id; \
292
KEYCACHE_DBUG_PRINT(l,("[thread %ld",keycache_thread_id)) }
294
#define KEYCACHE_THREAD_TRACE_END(l) \
295
KEYCACHE_DBUG_PRINT(l,("]thread %ld",keycache_thread_id))
297
#define KEYCACHE_THREAD_TRACE(l) KEYCACHE_DBUG_PRINT(l,(""))
298
#define KEYCACHE_THREAD_TRACE_BEGIN(l) KEYCACHE_DBUG_PRINT(l,(""))
299
#define KEYCACHE_THREAD_TRACE_END(l) KEYCACHE_DBUG_PRINT(l,(""))
302
#define KEYCACHE_THREAD_TRACE_BEGIN(l)
303
#define KEYCACHE_THREAD_TRACE_END(l)
304
#define KEYCACHE_THREAD_TRACE(l)
305
#endif /* defined(KEYCACHE_DEBUG) || !defined(DBUG_OFF) */
307
#define BLOCK_NUMBER(b) \
308
((uint) (((char*)(b)-(char *) keycache->block_root)/sizeof(BLOCK_LINK)))
309
#define HASH_LINK_NUMBER(h) \
310
((uint) (((char*)(h)-(char *) keycache->hash_link_root)/sizeof(HASH_LINK)))
312
#if (defined(KEYCACHE_TIMEOUT)) || defined(KEYCACHE_DEBUG)
227
313
static int keycache_pthread_cond_wait(pthread_cond_t *cond,
228
314
pthread_mutex_t *mutex);
230
316
#define keycache_pthread_cond_wait pthread_cond_wait
319
#if defined(KEYCACHE_DEBUG)
320
static int keycache_pthread_mutex_lock(pthread_mutex_t *mutex);
321
static void keycache_pthread_mutex_unlock(pthread_mutex_t *mutex);
322
static int keycache_pthread_cond_signal(pthread_cond_t *cond);
233
324
#define keycache_pthread_mutex_lock pthread_mutex_lock
234
325
#define keycache_pthread_mutex_unlock pthread_mutex_unlock
235
326
#define keycache_pthread_cond_signal pthread_cond_signal
237
static inline uint32_t next_power(uint32_t value)
327
#endif /* defined(KEYCACHE_DEBUG) */
329
#if !defined(DBUG_OFF)
333
#define inline /* disabled inline for easier debugging */
334
static int fail_block(BLOCK_LINK *block);
335
static int fail_hlink(HASH_LINK *hlink);
336
static int cache_empty(KEY_CACHE *keycache);
339
static inline uint next_power(uint value)
239
return (uint) my_round_up_to_next_power((uint32_t) value) << 1;
341
return (uint) my_round_up_to_next_power((uint32) value) << 1;
268
int init_key_cache(KEY_CACHE *keycache, uint32_t key_cache_block_size,
269
size_t use_mem, uint32_t division_limit,
270
uint32_t age_threshold)
370
int init_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
371
size_t use_mem, uint division_limit,
272
uint32_t blocks, hash_links;
374
ulong blocks, hash_links;
275
assert(key_cache_block_size >= 512);
377
DBUG_ENTER("init_key_cache");
378
DBUG_ASSERT(key_cache_block_size >= 512);
277
381
if (keycache->key_cache_inited && keycache->disk_blocks > 0)
383
DBUG_PRINT("warning",("key cache already in use"));
282
387
keycache->global_cache_w_requests= keycache->global_cache_r_requests= 0;
350
458
keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root +
351
459
ALIGN_SIZE((sizeof(HASH_LINK*) *
352
460
keycache->hash_entries)));
353
memset(keycache->block_root, 0,
354
keycache->disk_blocks * sizeof(BLOCK_LINK));
355
memset(keycache->hash_root, 0,
356
keycache->hash_entries * sizeof(HASH_LINK*));
357
memset(keycache->hash_link_root, 0,
358
keycache->hash_links * sizeof(HASH_LINK));
461
bzero((uchar*) keycache->block_root,
462
keycache->disk_blocks * sizeof(BLOCK_LINK));
463
bzero((uchar*) keycache->hash_root,
464
keycache->hash_entries * sizeof(HASH_LINK*));
465
bzero((uchar*) keycache->hash_link_root,
466
keycache->hash_links * sizeof(HASH_LINK));
359
467
keycache->hash_links_used= 0;
360
468
keycache->free_hash_list= NULL;
361
469
keycache->blocks_used= keycache->blocks_changed= 0;
381
489
keycache->waiting_for_hash_link.last_thread= NULL;
382
490
keycache->waiting_for_block.last_thread= NULL;
383
memset(keycache->changed_blocks, 0,
384
sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH);
385
memset(keycache->file_blocks, 0,
386
sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH);
492
("disk_blocks: %d block_root: 0x%lx hash_entries: %d\
493
hash_root: 0x%lx hash_links: %d hash_link_root: 0x%lx",
494
keycache->disk_blocks, (long) keycache->block_root,
495
keycache->hash_entries, (long) keycache->hash_root,
496
keycache->hash_links, (long) keycache->hash_link_root));
497
bzero((uchar*) keycache->changed_blocks,
498
sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH);
499
bzero((uchar*) keycache->file_blocks,
500
sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH);
443
557
(when cnt_for_resize=0).
446
int resize_key_cache(KEY_CACHE *keycache, uint32_t key_cache_block_size,
447
size_t use_mem, uint32_t division_limit,
448
uint32_t age_threshold)
560
int resize_key_cache(KEY_CACHE *keycache, uint key_cache_block_size,
561
size_t use_mem, uint division_limit,
565
DBUG_ENTER("resize_key_cache");
452
567
if (!keycache->key_cache_inited)
453
return(keycache->disk_blocks);
568
DBUG_RETURN(keycache->disk_blocks);
455
570
if(key_cache_block_size == keycache->key_cache_block_size &&
456
571
use_mem == keycache->key_cache_mem_size)
458
573
change_key_cache_param(keycache, division_limit, age_threshold);
459
return(keycache->disk_blocks);
574
DBUG_RETURN(keycache->disk_blocks);
462
577
keycache_pthread_mutex_lock(&keycache->cache_lock);
465
581
We may need to wait for another thread which is doing a resize
466
582
already. This cannot happen in the MySQL server though. It allows
950
1110
not linked in the LRU ring.
953
static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, bool hot,
1113
static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, my_bool hot,
956
1116
BLOCK_LINK *ins;
957
1117
BLOCK_LINK **pins;
959
assert((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE));
960
assert(block->hash_link); /*backptr to block NULL from free_block()*/
961
assert(!block->requests);
962
assert(block->prev_changed && *block->prev_changed == block);
963
assert(!block->next_used);
964
assert(!block->prev_used);
1119
DBUG_ASSERT((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE));
1120
DBUG_ASSERT(block->hash_link); /*backptr to block NULL from free_block()*/
1121
DBUG_ASSERT(!block->requests);
1122
DBUG_ASSERT(block->prev_changed && *block->prev_changed == block);
1123
DBUG_ASSERT(!block->next_used);
1124
DBUG_ASSERT(!block->prev_used);
965
1126
if (!hot && keycache->waiting_for_block.last_thread)
967
1128
/* Signal that in the LRU warm sub-chain an available block has appeared */
1195
1398
static void remove_reader(BLOCK_LINK *block)
1197
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1198
assert(block->hash_link && block->hash_link->block == block);
1199
assert(block->prev_changed && *block->prev_changed == block);
1200
assert(!block->next_used);
1201
assert(!block->prev_used);
1202
assert(block->hash_link->requests);
1400
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
1401
DBUG_ASSERT(block->hash_link && block->hash_link->block == block);
1402
DBUG_ASSERT(block->prev_changed && *block->prev_changed == block);
1403
DBUG_ASSERT(!block->next_used);
1404
DBUG_ASSERT(!block->prev_used);
1405
DBUG_ASSERT(block->hash_link->requests);
1203
1407
if (! --block->hash_link->requests && block->condvar)
1204
1408
keycache_pthread_cond_signal(block->condvar);
1410
--block->hash_link->requests;
1213
1420
static void wait_for_readers(KEY_CACHE *keycache,
1214
1421
BLOCK_LINK *block)
1216
1424
struct st_my_thread_var *thread= my_thread_var;
1217
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1218
assert(!(block->status & (BLOCK_ERROR | BLOCK_IN_FLUSH |
1425
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
1426
DBUG_ASSERT(!(block->status & (BLOCK_ERROR | BLOCK_IN_FLUSH |
1219
1427
BLOCK_CHANGED)));
1220
assert(block->hash_link);
1221
assert(block->hash_link->block == block);
1428
DBUG_ASSERT(block->hash_link);
1429
DBUG_ASSERT(block->hash_link->block == block);
1222
1430
/* Linked in file_blocks or changed_blocks hash. */
1223
assert(block->prev_changed && *block->prev_changed == block);
1431
DBUG_ASSERT(block->prev_changed && *block->prev_changed == block);
1224
1432
/* Not linked in LRU ring. */
1225
assert(!block->next_used);
1226
assert(!block->prev_used);
1433
DBUG_ASSERT(!block->next_used);
1434
DBUG_ASSERT(!block->prev_used);
1227
1435
while (block->hash_link->requests)
1437
KEYCACHE_DBUG_PRINT("wait_for_readers: wait",
1438
("suspend thread %ld block %u",
1439
thread->id, BLOCK_NUMBER(block)));
1229
1440
/* There must be no other waiter. We have no queue here. */
1230
assert(!block->condvar);
1441
DBUG_ASSERT(!block->condvar);
1231
1442
block->condvar= &thread->suspend;
1232
1443
keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
1233
1444
block->condvar= NULL;
1447
KEYCACHE_DBUG_ASSERT(block->hash_link->requests == 0);
1314
1541
hash_link points to the first member of the list
1316
1543
hash_link= *(start= &keycache->hash_root[KEYCACHE_HASH(file, filepos)]);
1544
#if defined(KEYCACHE_DEBUG)
1317
1547
/* Look for an element for the pair (file, filepos) in the bucket chain */
1318
1548
while (hash_link &&
1319
1549
(hash_link->diskpos != filepos || hash_link->file != file))
1321
1551
hash_link= hash_link->next;
1552
#if defined(KEYCACHE_DEBUG)
1554
if (! (cnt <= keycache->hash_links_used))
1557
for (i=0, hash_link= *start ;
1558
i < cnt ; i++, hash_link= hash_link->next)
1560
KEYCACHE_DBUG_PRINT("get_hash_link", ("fd: %u pos: %lu",
1561
(uint) hash_link->file,(ulong) hash_link->diskpos));
1564
KEYCACHE_DBUG_ASSERT(cnt <= keycache->hash_links_used);
1323
1567
if (! hash_link)
1404
1655
int page_status;
1657
DBUG_ENTER("find_key_block");
1658
KEYCACHE_THREAD_TRACE("find_key_block:begin");
1659
DBUG_PRINT("enter", ("fd: %d pos: %lu wrmode: %d",
1660
file, (ulong) filepos, wrmode));
1661
KEYCACHE_DBUG_PRINT("find_key_block", ("fd: %d pos: %lu wrmode: %d",
1662
file, (ulong) filepos,
1664
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
1665
DBUG_EXECUTE("check_keycache2",
1666
test_key_cache(keycache, "start of find_key_block", 0););
1408
1671
If the flush phase of a resize operation fails, the cache is left
1409
1672
unusable. This will be detected only after "goto restart".
1411
1674
if (!keycache->can_be_used)
1415
1678
Find the hash_link for the requested file block (file, filepos). We
1770
2045
/* There are some never used blocks, take first of them */
1771
assert(keycache->blocks_used <
1772
(uint32_t) keycache->disk_blocks);
2046
DBUG_ASSERT(keycache->blocks_used <
2047
(ulong) keycache->disk_blocks);
1773
2048
block= &keycache->block_root[keycache->blocks_used];
1774
2049
block->buffer= ADD_TO_PTR(keycache->block_mem,
1775
((uint32_t) keycache->blocks_used*
2050
((ulong) keycache->blocks_used*
1776
2051
keycache->key_cache_block_size),
1778
2053
keycache->blocks_used++;
1779
assert(!block->next_used);
2054
DBUG_ASSERT(!block->next_used);
1781
assert(!block->prev_used);
1782
assert(!block->next_changed);
1783
assert(!block->prev_changed);
1784
assert(!block->hash_link);
1785
assert(!block->status);
1786
assert(!block->requests);
2056
DBUG_ASSERT(!block->prev_used);
2057
DBUG_ASSERT(!block->next_changed);
2058
DBUG_ASSERT(!block->prev_changed);
2059
DBUG_ASSERT(!block->hash_link);
2060
DBUG_ASSERT(!block->status);
2061
DBUG_ASSERT(!block->requests);
1787
2062
keycache->blocks_unused--;
1788
2063
block->status= BLOCK_IN_USE;
1789
2064
block->length= 0;
1823
2102
link_into_queue(&keycache->waiting_for_block, thread);
2105
KEYCACHE_DBUG_PRINT("find_key_block: wait",
2106
("suspend thread %ld", thread->id));
1826
2107
keycache_pthread_cond_wait(&thread->suspend,
1827
2108
&keycache->cache_lock);
1829
2110
while (thread->next);
1830
2111
thread->opt_info= NULL;
1831
2112
/* Assert that block has a request registered. */
1832
assert(hash_link->block->requests);
2113
DBUG_ASSERT(hash_link->block->requests);
1833
2114
/* Assert that block is not in LRU ring. */
1834
assert(!hash_link->block->next_used);
1835
assert(!hash_link->block->prev_used);
2115
DBUG_ASSERT(!hash_link->block->next_used);
2116
DBUG_ASSERT(!hash_link->block->prev_used);
2119
KEYCACHE_DBUG_ASSERT(keycache->used_last);
1838
2122
If we waited above, hash_link->block has been assigned by
1839
2123
link_block(). Otherwise it is still NULL. In the latter case
1906
2194
BLOCK_IN_EVICTION may be true or not. Other flags must
1907
2195
have a fixed value.
1909
assert((block->status & ~BLOCK_IN_EVICTION) ==
2197
DBUG_ASSERT((block->status & ~BLOCK_IN_EVICTION) ==
1910
2198
(BLOCK_READ | BLOCK_IN_SWITCH |
1911
2199
BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE |
1912
2200
BLOCK_CHANGED | BLOCK_IN_USE));
1913
assert(block->hash_link);
2201
DBUG_ASSERT(block->hash_link);
1915
2203
keycache_pthread_mutex_unlock(&keycache->cache_lock);
1917
2205
The call is thread safe because only the current
1918
2206
thread might change the block->hash_link value
1920
error= (pwrite(block->hash_link->file,
1921
block->buffer+block->offset,
1922
block->length - block->offset,
1923
block->hash_link->diskpos+ block->offset) == 0);
2208
error= my_pwrite(block->hash_link->file,
2209
block->buffer+block->offset,
2210
block->length - block->offset,
2211
block->hash_link->diskpos+ block->offset,
2212
MYF(MY_NABP | MY_WAIT_IF_FULL));
1924
2213
keycache_pthread_mutex_lock(&keycache->cache_lock);
1926
2215
/* Block status must not have changed. */
1927
assert((block->status & ~BLOCK_IN_EVICTION) ==
2216
DBUG_ASSERT((block->status & ~BLOCK_IN_EVICTION) ==
1928
2217
(BLOCK_READ | BLOCK_IN_SWITCH |
1929
2218
BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE |
1930
BLOCK_CHANGED | BLOCK_IN_USE));
2219
BLOCK_CHANGED | BLOCK_IN_USE) || fail_block(block));
1931
2220
keycache->global_cache_write++;
2038
2331
Register a request on the block. This is another protection
2039
2332
against eviction.
2041
assert(((block->hash_link != hash_link) &&
2334
DBUG_ASSERT(((block->hash_link != hash_link) &&
2042
2335
(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) ||
2043
2336
((block->hash_link == hash_link) &&
2044
2337
!(block->status & BLOCK_READ)) ||
2045
2338
((block->status & BLOCK_READ) &&
2046
2339
!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))));
2047
2340
reg_requests(keycache, block, 1);
2341
KEYCACHE_DBUG_PRINT("find_key_block",
2342
("block->hash_link: %p hash_link: %p "
2343
"block->status: %u", block->hash_link,
2344
hash_link, block->status ));
2048
2345
page_status= (((block->hash_link == hash_link) &&
2049
2346
(block->status & BLOCK_READ)) ?
2050
2347
PAGE_READ : PAGE_WAIT_TO_BE_READ);
2054
assert(page_status != -1);
2351
KEYCACHE_DBUG_ASSERT(page_status != -1);
2055
2352
/* Same assert basically, but be very sure. */
2353
KEYCACHE_DBUG_ASSERT(block);
2057
2354
/* Assert that block has a request and is not in LRU ring. */
2058
assert(block->requests);
2059
assert(!block->next_used);
2060
assert(!block->prev_used);
2355
DBUG_ASSERT(block->requests);
2356
DBUG_ASSERT(!block->next_used);
2357
DBUG_ASSERT(!block->prev_used);
2061
2358
/* Assert that we return the correct block. */
2062
assert((page_status == PAGE_WAIT_TO_BE_READ) ||
2359
DBUG_ASSERT((page_status == PAGE_WAIT_TO_BE_READ) ||
2063
2360
((block->hash_link->file == file) &&
2064
2361
(block->hash_link->diskpos == filepos)));
2065
2362
*page_st=page_status;
2363
KEYCACHE_DBUG_PRINT("find_key_block",
2364
("fd: %d pos: %lu block->status: %u page_status: %d",
2365
file, (ulong) filepos, block->status,
2368
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
2369
DBUG_EXECUTE("check_keycache2",
2370
test_key_cache(keycache, "end of find_key_block",0););
2372
KEYCACHE_THREAD_TRACE("find_key_block:end");
2108
2415
request for the block become secondary requests. For a primary
2109
2416
request the block must be properly initialized.
2111
assert(((block->status & ~BLOCK_FOR_UPDATE) == BLOCK_IN_USE));
2112
assert((block->length == 0));
2113
assert((block->offset == keycache->key_cache_block_size));
2114
assert((block->requests > 0));
2418
DBUG_ASSERT(((block->status & ~BLOCK_FOR_UPDATE) == BLOCK_IN_USE) ||
2420
DBUG_ASSERT((block->length == 0) || fail_block(block));
2421
DBUG_ASSERT((block->offset == keycache->key_cache_block_size) ||
2423
DBUG_ASSERT((block->requests > 0) || fail_block(block));
2425
KEYCACHE_DBUG_PRINT("read_block",
2426
("page to be read by primary request"));
2116
2428
keycache->global_cache_read++;
2117
2429
/* Page is not in buffer yet, is to be read from disk */
2120
2432
Here other threads may step in and register as secondary readers.
2121
2433
They will register in block->wqueue[COND_FOR_REQUESTED].
2123
got_length= pread(block->hash_link->file, block->buffer, read_length, block->hash_link->diskpos);
2435
got_length= my_pread(block->hash_link->file, block->buffer,
2436
read_length, block->hash_link->diskpos, MYF(0));
2124
2437
keycache_pthread_mutex_lock(&keycache->cache_lock);
2126
2439
The block can now have been marked for free (in case of
2127
2440
FLUSH_RELEASE). Otherwise the state must be unchanged.
2129
assert(((block->status & ~(BLOCK_REASSIGNED |
2130
BLOCK_FOR_UPDATE)) == BLOCK_IN_USE));
2131
assert((block->length == 0));
2132
assert((block->offset == keycache->key_cache_block_size));
2133
assert((block->requests > 0));
2442
DBUG_ASSERT(((block->status & ~(BLOCK_REASSIGNED |
2443
BLOCK_FOR_UPDATE)) == BLOCK_IN_USE) ||
2445
DBUG_ASSERT((block->length == 0) || fail_block(block));
2446
DBUG_ASSERT((block->offset == keycache->key_cache_block_size) ||
2448
DBUG_ASSERT((block->requests > 0) || fail_block(block));
2135
2450
if (got_length < min_length)
2136
2451
block->status|= BLOCK_ERROR;
2186
2507
The function ensures that a block of data of size length from file
2187
2508
positioned at filepos is in the buffers for some key cache blocks.
2188
2509
Then the function either copies the data into the buffer buff, or,
2189
if return_buffer is true, it just returns the pointer to the key cache
2510
if return_buffer is TRUE, it just returns the pointer to the key cache
2190
2511
buffer with the data.
2191
2512
Filepos must be a multiple of 'block_length', but it doesn't
2192
2513
have to be a multiple of key_cache_block_size;
2195
unsigned char *key_cache_read(KEY_CACHE *keycache,
2196
int file, my_off_t filepos, int level,
2197
unsigned char *buff, uint32_t length,
2198
uint32_t block_length,
2516
uchar *key_cache_read(KEY_CACHE *keycache,
2517
File file, my_off_t filepos, int level,
2518
uchar *buff, uint length,
2519
uint block_length __attribute__((unused)),
2520
int return_buffer __attribute__((unused)))
2202
(void)return_buffer;
2203
bool locked_and_incremented= false;
2522
my_bool locked_and_incremented= FALSE;
2205
unsigned char *start= buff;
2525
DBUG_ENTER("key_cache_read");
2526
DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
2527
(uint) file, (ulong) filepos, length));
2207
2529
if (keycache->key_cache_inited)
2209
2531
/* Key cache is used */
2210
2532
register BLOCK_LINK *block;
2211
uint32_t read_length;
2278
2606
/* The requested page is to be read into the block buffer */
2279
2607
read_block(keycache, block,
2280
2608
keycache->key_cache_block_size, read_length+offset,
2281
(bool)(page_st == PAGE_TO_BE_READ));
2609
(my_bool)(page_st == PAGE_TO_BE_READ));
2283
2611
A secondary request must now have the block assigned to the
2284
2612
requested file block. It does not hurt to check it for
2285
2613
primary requests too.
2287
assert(keycache->can_be_used);
2288
assert(block->hash_link->file == file);
2289
assert(block->hash_link->diskpos == filepos);
2290
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
2615
DBUG_ASSERT(keycache->can_be_used);
2616
DBUG_ASSERT(block->hash_link->file == file);
2617
DBUG_ASSERT(block->hash_link->diskpos == filepos);
2618
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
2292
2620
else if (block->length < read_length + offset)
2304
2632
/* block status may have added BLOCK_ERROR in the above 'if'. */
2305
2633
if (!((status= block->status) & BLOCK_ERROR))
2636
if (! return_buffer)
2308
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
2639
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
2309
2640
#if !defined(SERIALIZED_READ_FROM_CACHE)
2310
2641
keycache_pthread_mutex_unlock(&keycache->cache_lock);
2313
2644
/* Copy data from the cache buffer */
2314
memcpy(buff, block->buffer+offset, (size_t) read_length);
2645
if (!(read_length & 511))
2646
bmove512(buff, block->buffer+offset, read_length);
2648
memcpy(buff, block->buffer+offset, (size_t) read_length);
2316
2650
#if !defined(SERIALIZED_READ_FROM_CACHE)
2317
2651
keycache_pthread_mutex_lock(&keycache->cache_lock);
2318
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
2652
DBUG_ASSERT(block->status & (BLOCK_READ | BLOCK_IN_USE));
2389
2728
int key_cache_insert(KEY_CACHE *keycache,
2390
int file, my_off_t filepos, int level,
2391
unsigned char *buff, uint32_t length)
2729
File file, my_off_t filepos, int level,
2730
uchar *buff, uint length)
2733
DBUG_ENTER("key_cache_insert");
2734
DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u",
2735
(uint) file,(ulong) filepos, length));
2395
2737
if (keycache->key_cache_inited)
2397
2739
/* Key cache is used */
2398
2740
register BLOCK_LINK *block;
2399
uint32_t read_length;
2402
bool locked_and_incremented= false;
2744
my_bool locked_and_incremented= FALSE;
2405
2747
When the keycache is once initialized, we use the cache_lock to
2613
2960
The function copies the data of size length from buff into buffers
2614
2961
for key cache blocks that are assigned to contain the portion of
2615
2962
the file starting with position filepos.
2616
It ensures that this data is flushed to the file if dont_write is false.
2963
It ensures that this data is flushed to the file if dont_write is FALSE.
2617
2964
Filepos must be a multiple of 'block_length', but it doesn't
2618
2965
have to be a multiple of key_cache_block_size;
2620
dont_write is always true in the server (info->lock_type is never F_UNLCK).
2967
dont_write is always TRUE in the server (info->lock_type is never F_UNLCK).
2623
2970
int key_cache_write(KEY_CACHE *keycache,
2624
int file, my_off_t filepos, int level,
2625
unsigned char *buff, uint32_t length,
2626
uint32_t block_length,
2971
File file, my_off_t filepos, int level,
2972
uchar *buff, uint length,
2973
uint block_length __attribute__((unused)),
2627
2974
int dont_write)
2630
bool locked_and_incremented= false;
2976
my_bool locked_and_incremented= FALSE;
2978
DBUG_ENTER("key_cache_write");
2980
("fd: %u pos: %lu length: %u block_length: %u"
2981
" key_block_length: %u",
2982
(uint) file, (ulong) filepos, length, block_length,
2983
keycache ? keycache->key_cache_block_size : 0));
2633
2985
if (!dont_write)
2987
/* purecov: begin inspected */
2635
2988
/* Not used in the server. */
2636
2989
/* Force writing from buff into disk. */
2637
2990
keycache->global_cache_w_requests++;
2638
2991
keycache->global_cache_write++;
2639
if (pwrite(file, buff, length, filepos) == 0)
2992
if (my_pwrite(file, buff, length, filepos, MYF(MY_NABP | MY_WAIT_IF_FULL)))
2997
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
2998
DBUG_EXECUTE("check_keycache",
2999
test_key_cache(keycache, "start of key_cache_write", 1););
2643
3002
if (keycache->key_cache_inited)
2645
3004
/* Key cache is used */
2646
3005
register BLOCK_LINK *block;
2647
uint32_t read_length;
2921
3293
is registered in the hash_link and free_block() will wait for it
2924
assert((block->status & BLOCK_IN_USE) &&
3296
DBUG_ASSERT((block->status & BLOCK_IN_USE) &&
2925
3297
!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
2926
3298
BLOCK_REASSIGNED | BLOCK_IN_FLUSH |
2927
3299
BLOCK_CHANGED | BLOCK_FOR_UPDATE)));
2928
3300
/* Assert that the block is in a file_blocks chain. */
2929
assert(block->prev_changed && *block->prev_changed == block);
3301
DBUG_ASSERT(block->prev_changed && *block->prev_changed == block);
2930
3302
/* Assert that the block is not in the LRU ring. */
2931
assert(!block->next_used && !block->prev_used);
3303
DBUG_ASSERT(!block->next_used && !block->prev_used);
2933
3305
IMHO the below condition (if()) makes no sense. I can't see how it
2934
3306
could be possible that free_block() is entered with a NULL hash_link
2935
3307
pointer. The only place where it can become NULL is in free_block()
2936
3308
(or before its first use ever, but for those blocks free_block() is
2937
3309
not called). I don't remove the conditional as it cannot harm, but
2938
place an assert to confirm my hypothesis. Eventually the
3310
place an DBUG_ASSERT to confirm my hypothesis. Eventually the
2939
3311
condition (if()) can be removed.
2941
assert(block->hash_link && block->hash_link->block == block);
3313
DBUG_ASSERT(block->hash_link && block->hash_link->block == block);
2942
3314
if (block->hash_link)
3075
3452
if (!(block->status & BLOCK_FOR_UPDATE))
3077
3454
/* Blocks coming here must have a certain status. */
3078
assert(block->hash_link);
3079
assert(block->hash_link->block == block);
3080
assert(block->hash_link->file == file);
3081
assert((block->status & ~BLOCK_IN_EVICTION) ==
3455
DBUG_ASSERT(block->hash_link);
3456
DBUG_ASSERT(block->hash_link->block == block);
3457
DBUG_ASSERT(block->hash_link->file == file);
3458
DBUG_ASSERT((block->status & ~BLOCK_IN_EVICTION) ==
3082
3459
(BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE));
3083
3460
block->status|= BLOCK_IN_FLUSHWRITE;
3084
3461
keycache_pthread_mutex_unlock(&keycache->cache_lock);
3085
error= (pwrite(file,
3086
block->buffer+block->offset,
3087
block->length - block->offset,
3088
block->hash_link->diskpos+ block->offset) == 0);
3462
error= my_pwrite(file,
3463
block->buffer+block->offset,
3464
block->length - block->offset,
3465
block->hash_link->diskpos+ block->offset,
3466
MYF(MY_NABP | MY_WAIT_IF_FULL));
3089
3467
keycache_pthread_mutex_lock(&keycache->cache_lock);
3090
3468
keycache->global_cache_write++;
3170
3548
static int flush_key_blocks_int(KEY_CACHE *keycache,
3171
int file, enum flush_type type)
3549
File file, enum flush_type type)
3173
3551
BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache;
3174
3552
int last_errno= 0;
3175
3553
int last_errcnt= 0;
3554
DBUG_ENTER("flush_key_blocks_int");
3555
DBUG_PRINT("enter",("file: %d blocks_used: %lu blocks_changed: %lu",
3556
file, keycache->blocks_used, keycache->blocks_changed));
3558
#if !defined(DBUG_OFF) && defined(EXTRA_DEBUG)
3559
DBUG_EXECUTE("check_keycache",
3560
test_key_cache(keycache, "start of flush_key_blocks", 0););
3177
3563
cache= cache_buff;
3178
3564
if (keycache->disk_blocks > 0 &&
3742
4159
reset_key_cache_counters()
4160
name the name of a key cache
4161
key_cache pointer to the key kache to be reset
3745
This procedure is used by process_key_caches() to reset the key_cache.
4164
This procedure is used by process_key_caches() to reset the counters of all
4165
currently used key caches, both the default one and the named ones.
3748
4168
0 on success (always because it can't fail)
3751
void reset_key_cache_counters()
3753
dflt_key_cache->global_blocks_changed= 0; /* Key_blocks_not_flushed */
3754
dflt_key_cache->global_cache_r_requests= 0; /* Key_read_requests */
3755
dflt_key_cache->global_cache_read= 0; /* Key_reads */
3756
dflt_key_cache->global_cache_w_requests= 0; /* Key_write_requests */
3757
dflt_key_cache->global_cache_write= 0; /* Key_writes */
4171
int reset_key_cache_counters(const char *name __attribute__((unused)),
4172
KEY_CACHE *key_cache)
4174
DBUG_ENTER("reset_key_cache_counters");
4175
if (!key_cache->key_cache_inited)
4177
DBUG_PRINT("info", ("Key cache %s not initialized.", name));
4180
DBUG_PRINT("info", ("Resetting counters for key cache %s.", name));
4182
key_cache->global_blocks_changed= 0; /* Key_blocks_not_flushed */
4183
key_cache->global_cache_r_requests= 0; /* Key_read_requests */
4184
key_cache->global_cache_read= 0; /* Key_reads */
4185
key_cache->global_cache_w_requests= 0; /* Key_write_requests */
4186
key_cache->global_cache_write= 0; /* Key_writes */
4193
Test if disk-cache is ok
4195
static void test_key_cache(KEY_CACHE *keycache __attribute__((unused)),
4196
const char *where __attribute__((unused)),
4197
my_bool lock __attribute__((unused)))
3760
4203
#if defined(KEYCACHE_TIMEOUT)
3764
unsigned int hash_link_number(HASH_LINK *hash_link, KEY_CACHE *keycache)
3766
return ((unsigned int) (((char*)hash_link-(char *) keycache->hash_link_root)/
3767
sizeof(HASH_LINK)));
3771
unsigned int block_number(BLOCK_LINK *block, KEY_CACHE *keycache)
3773
return ((unsigned int) (((char*)block-(char *)keycache->block_root)/
3774
sizeof(BLOCK_LINK)));
3778
4205
#define KEYCACHE_DUMP_FILE "keycache_dump.txt"
3779
4206
#define MAX_QUEUE_LEN 100
3830
4257
block= &keycache->block_root[i];
3831
4258
hash_link= block->hash_link;
3832
4259
fprintf(keycache_dump_file,
3833
"block:%u hash_link:%d status:%x #requests=%u "
3834
"waiting_for_readers:%d\n",
3835
i, (int) (hash_link ? hash_link_number(hash_link, keycache) : -1),
3836
block->status, block->requests, block->condvar ? 1 : 0);
4260
"block:%u hash_link:%d status:%x #requests=%u waiting_for_readers:%d\n",
4261
i, (int) (hash_link ? HASH_LINK_NUMBER(hash_link) : -1),
4262
block->status, block->requests, block->condvar ? 1 : 0);
3837
4263
for (j=0 ; j < 2; j++)
3839
4265
KEYCACHE_WQUEUE *wqueue=&block->wqueue[j];
3888
4322
1 nanosecond = 1000 micro seconds
3890
4324
timeout.tv_nsec= now.tv_usec * 1000;
4325
KEYCACHE_THREAD_TRACE_END("started waiting");
4326
#if defined(KEYCACHE_DEBUG)
4329
fprintf(keycache_debug_log, "waiting...\n");
4330
fflush(keycache_debug_log);
3891
4332
rc= pthread_cond_timedwait(cond, mutex, &timeout);
4333
KEYCACHE_THREAD_TRACE_BEGIN("finished waiting");
3892
4334
if (rc == ETIMEDOUT || rc == ETIME)
4336
#if defined(KEYCACHE_DEBUG)
4337
fprintf(keycache_debug_log,"aborted by keycache timeout\n");
4338
fclose(keycache_debug_log);
3894
4341
keycache_dump();
4344
#if defined(KEYCACHE_DEBUG)
4345
KEYCACHE_DBUG_ASSERT(rc != ETIMEDOUT);
3897
4347
assert(rc != ETIMEDOUT);
4352
#if defined(KEYCACHE_DEBUG)
4353
static int keycache_pthread_cond_wait(pthread_cond_t *cond,
4354
pthread_mutex_t *mutex)
4357
KEYCACHE_THREAD_TRACE_END("started waiting");
4358
rc= pthread_cond_wait(cond, mutex);
4359
KEYCACHE_THREAD_TRACE_BEGIN("finished waiting");
3900
4363
#endif /* defined(KEYCACHE_TIMEOUT) */
4365
#if defined(KEYCACHE_DEBUG)
4368
static int keycache_pthread_mutex_lock(pthread_mutex_t *mutex)
4371
rc= pthread_mutex_lock(mutex);
4372
KEYCACHE_THREAD_TRACE_BEGIN("");
4377
static void keycache_pthread_mutex_unlock(pthread_mutex_t *mutex)
4379
KEYCACHE_THREAD_TRACE_END("");
4380
pthread_mutex_unlock(mutex);
4384
static int keycache_pthread_cond_signal(pthread_cond_t *cond)
4387
KEYCACHE_THREAD_TRACE("signal");
4388
rc= pthread_cond_signal(cond);
4393
#if defined(KEYCACHE_DEBUG_LOG)
4396
static void keycache_debug_print(const char * fmt,...)
4400
if (keycache_debug_log)
4402
VOID(vfprintf(keycache_debug_log, fmt, args));
4403
VOID(fputc('\n',keycache_debug_log));
4407
#endif /* defined(KEYCACHE_DEBUG_LOG) */
4409
#if defined(KEYCACHE_DEBUG_LOG)
4412
void keycache_debug_log_close(void)
4414
if (keycache_debug_log)
4415
fclose(keycache_debug_log);
4417
#endif /* defined(KEYCACHE_DEBUG_LOG) */
4419
#endif /* defined(KEYCACHE_DEBUG) */
4421
#if !defined(DBUG_OFF)
4422
#define F_B_PRT(_f_, _v_) DBUG_PRINT("assert_fail", (_f_, _v_))
4424
static int fail_block(BLOCK_LINK *block)
4426
F_B_PRT("block->next_used: %lx\n", (ulong) block->next_used);
4427
F_B_PRT("block->prev_used: %lx\n", (ulong) block->prev_used);
4428
F_B_PRT("block->next_changed: %lx\n", (ulong) block->next_changed);
4429
F_B_PRT("block->prev_changed: %lx\n", (ulong) block->prev_changed);
4430
F_B_PRT("block->hash_link: %lx\n", (ulong) block->hash_link);
4431
F_B_PRT("block->status: %u\n", block->status);
4432
F_B_PRT("block->length: %u\n", block->length);
4433
F_B_PRT("block->offset: %u\n", block->offset);
4434
F_B_PRT("block->requests: %u\n", block->requests);
4435
F_B_PRT("block->temperature: %u\n", block->temperature);
4436
return 0; /* Let the assert fail. */
4439
static int fail_hlink(HASH_LINK *hlink)
4441
F_B_PRT("hlink->next: %lx\n", (ulong) hlink->next);
4442
F_B_PRT("hlink->prev: %lx\n", (ulong) hlink->prev);
4443
F_B_PRT("hlink->block: %lx\n", (ulong) hlink->block);
4444
F_B_PRT("hlink->diskpos: %lu\n", (ulong) hlink->diskpos);
4445
F_B_PRT("hlink->file: %d\n", hlink->file);
4446
return 0; /* Let the assert fail. */
4449
static int cache_empty(KEY_CACHE *keycache)
4453
if (keycache->disk_blocks <= 0)
4455
for (idx= 0; idx < keycache->disk_blocks; idx++)
4457
BLOCK_LINK *block= keycache->block_root + idx;
4458
if (block->status || block->requests || block->hash_link)
4460
fprintf(stderr, "block index: %u\n", idx);
4465
for (idx= 0; idx < keycache->hash_links; idx++)
4467
HASH_LINK *hash_link= keycache->hash_link_root + idx;
4468
if (hash_link->requests || hash_link->block)
4470
fprintf(stderr, "hash_link index: %u\n", idx);
4471
fail_hlink(hash_link);
4477
fprintf(stderr, "blocks: %d used: %lu\n",
4478
keycache->disk_blocks, keycache->blocks_used);
4479
fprintf(stderr, "hash_links: %d used: %d\n",
4480
keycache->hash_links, keycache->hash_links_used);
4481
fprintf(stderr, "\n");