246
266
size_t use_mem, uint32_t division_limit,
247
267
uint32_t age_threshold)
250
(void)key_cache_block_size;
252
(void)division_limit;
254
memset(keycache, 0, sizeof(KEY_CACHE));
269
uint32_t blocks, hash_links;
272
assert(key_cache_block_size >= 512);
274
if (keycache->key_cache_inited && keycache->disk_blocks > 0)
279
keycache->global_cache_w_requests= keycache->global_cache_r_requests= 0;
280
keycache->global_cache_read= keycache->global_cache_write= 0;
281
keycache->disk_blocks= -1;
282
if (! keycache->key_cache_inited)
284
keycache->key_cache_inited= 1;
286
Initialize these variables once only.
287
Their value must survive re-initialization during resizing.
289
keycache->in_resize= 0;
290
keycache->resize_in_flush= 0;
291
keycache->cnt_for_resize_op= 0;
292
keycache->waiting_for_resize_cnt.last_thread= NULL;
293
keycache->in_init= 0;
294
pthread_mutex_init(&keycache->cache_lock, MY_MUTEX_INIT_FAST);
295
keycache->resize_queue.last_thread= NULL;
298
keycache->key_cache_mem_size= use_mem;
299
keycache->key_cache_block_size= key_cache_block_size;
301
blocks= (uint32_t) (use_mem / (sizeof(BLOCK_LINK) + 2 * sizeof(HASH_LINK) +
302
sizeof(HASH_LINK*) * 5/4 + key_cache_block_size));
303
/* It doesn't make sense to have too few blocks (less than 8) */
308
/* Set my_hash_entries to the next bigger 2 power */
309
if ((keycache->hash_entries= next_power(blocks)) < blocks * 5/4)
310
keycache->hash_entries<<= 1;
311
hash_links= 2 * blocks;
312
#if defined(MAX_THREADS)
313
if (hash_links < MAX_THREADS + blocks - 1)
314
hash_links= MAX_THREADS + blocks - 1;
316
while ((length= (ALIGN_SIZE(blocks * sizeof(BLOCK_LINK)) +
317
ALIGN_SIZE(hash_links * sizeof(HASH_LINK)) +
318
ALIGN_SIZE(sizeof(HASH_LINK*) *
319
keycache->hash_entries))) +
320
((size_t) blocks * keycache->key_cache_block_size) > use_mem)
322
/* Allocate memory for cache page buffers */
323
if ((keycache->block_mem= malloc((size_t) blocks * keycache->key_cache_block_size)))
326
Allocate memory for blocks, hash_links and hash entries;
327
For each block 2 hash links are allocated
329
if ((keycache->block_root= (BLOCK_LINK*) my_malloc(length,
332
free(keycache->block_mem);
333
keycache->block_mem= 0;
338
my_error(EE_OUTOFMEMORY, MYF(0), blocks * keycache->key_cache_block_size);
341
blocks= blocks / 4*3;
343
keycache->blocks_unused= blocks;
344
keycache->disk_blocks= (int) blocks;
345
keycache->hash_links= hash_links;
346
keycache->hash_root= (HASH_LINK**) ((char*) keycache->block_root +
347
ALIGN_SIZE(blocks*sizeof(BLOCK_LINK)));
348
keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root +
349
ALIGN_SIZE((sizeof(HASH_LINK*) *
350
keycache->hash_entries)));
351
memset(keycache->block_root, 0,
352
keycache->disk_blocks * sizeof(BLOCK_LINK));
353
memset(keycache->hash_root, 0,
354
keycache->hash_entries * sizeof(HASH_LINK*));
355
memset(keycache->hash_link_root, 0,
356
keycache->hash_links * sizeof(HASH_LINK));
357
keycache->hash_links_used= 0;
358
keycache->free_hash_list= NULL;
359
keycache->blocks_used= keycache->blocks_changed= 0;
361
keycache->global_blocks_changed= 0;
362
keycache->blocks_available=0; /* For debugging */
364
/* The LRU chain is empty after initialization */
365
keycache->used_last= NULL;
366
keycache->used_ins= NULL;
367
keycache->free_block_list= NULL;
368
keycache->keycache_time= 0;
369
keycache->warm_blocks= 0;
370
keycache->min_warm_blocks= (division_limit ?
371
blocks * division_limit / 100 + 1 :
373
keycache->age_threshold= (age_threshold ?
374
blocks * age_threshold / 100 :
377
keycache->can_be_used= 1;
379
keycache->waiting_for_hash_link.last_thread= NULL;
380
keycache->waiting_for_block.last_thread= NULL;
381
memset(keycache->changed_blocks, 0,
382
sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH);
383
memset(keycache->file_blocks, 0,
384
sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH);
388
/* key_buffer_size is specified too small. Disable the cache. */
389
keycache->can_be_used= 0;
392
keycache->blocks= keycache->disk_blocks > 0 ? keycache->disk_blocks : 0;
393
return((int) keycache->disk_blocks);
397
keycache->disk_blocks= 0;
399
if (keycache->block_mem)
401
free(keycache->block_mem);
402
keycache->block_mem= NULL;
404
if (keycache->block_root)
406
free((unsigned char*) keycache->block_root);
407
keycache->block_root= NULL;
410
keycache->can_be_used= 0;
420
keycache pointer to a key cache data structure
421
key_cache_block_size size of blocks to keep cached data
422
use_mem total memory to use for the new key cache
423
division_limit new division limit (if not zero)
424
age_threshold new age threshold (if not zero)
427
number of blocks in the key cache, if successful,
431
The function first compares the memory size and the block size parameters
432
with the key cache values.
434
If they differ the function free the the memory allocated for the
435
old key cache blocks by calling the end_key_cache function and
436
then rebuilds the key cache with new blocks by calling
439
The function starts the operation only when all other threads
440
performing operations with the key cache let her to proceed
441
(when cnt_for_resize=0).
444
int resize_key_cache(KEY_CACHE *keycache, uint32_t key_cache_block_size,
445
size_t use_mem, uint32_t division_limit,
446
uint32_t age_threshold)
450
if (!keycache->key_cache_inited)
451
return(keycache->disk_blocks);
453
if(key_cache_block_size == keycache->key_cache_block_size &&
454
use_mem == keycache->key_cache_mem_size)
456
change_key_cache_param(keycache, division_limit, age_threshold);
457
return(keycache->disk_blocks);
460
keycache_pthread_mutex_lock(&keycache->cache_lock);
463
We may need to wait for another thread which is doing a resize
464
already. This cannot happen in the MySQL server though. It allows
465
one resizer only. In set_var.cc keycache->in_init is used to block
468
while (keycache->in_resize)
470
/* purecov: begin inspected */
471
wait_on_queue(&keycache->resize_queue, &keycache->cache_lock);
476
Mark the operation in progress. This blocks other threads from doing
477
a resize in parallel. It prohibits new blocks to enter the cache.
478
Read/write requests can bypass the cache during the flush phase.
480
keycache->in_resize= 1;
482
/* Need to flush only if keycache is enabled. */
483
if (keycache->can_be_used)
485
/* Start the flush phase. */
486
keycache->resize_in_flush= 1;
488
if (flush_all_key_blocks(keycache))
490
/* TODO: if this happens, we should write a warning in the log file ! */
491
keycache->resize_in_flush= 0;
493
keycache->can_be_used= 0;
497
/* End the flush phase. */
498
keycache->resize_in_flush= 0;
502
Some direct read/write operations (bypassing the cache) may still be
503
unfinished. Wait until they are done. If the key cache can be used,
504
direct I/O is done in increments of key_cache_block_size. That is,
505
every block is checked if it is in the cache. We need to wait for
506
pending I/O before re-initializing the cache, because we may change
507
the block size. Otherwise they could check for blocks at file
508
positions where the new block division has none. We do also want to
509
wait for I/O done when (if) the cache was disabled. It must not
510
run in parallel with normal cache operation.
512
while (keycache->cnt_for_resize_op)
513
wait_on_queue(&keycache->waiting_for_resize_cnt, &keycache->cache_lock);
516
Free old cache structures, allocate new structures, and initialize
517
them. Note that the cache_lock mutex and the resize_queue are left
518
untouched. We do not lose the cache_lock and will release it only at
519
the end of this function.
521
end_key_cache(keycache, 0); /* Don't free mutex */
522
/* The following will work even if use_mem is 0 */
523
blocks= init_key_cache(keycache, key_cache_block_size, use_mem,
524
division_limit, age_threshold);
528
Mark the resize finished. This allows other threads to start a
529
resize or to request new cache blocks.
531
keycache->in_resize= 0;
533
/* Signal waiting threads. */
534
release_whole_queue(&keycache->resize_queue);
536
keycache_pthread_mutex_unlock(&keycache->cache_lock);
542
Increment counter blocking resize key cache operation
544
static inline void inc_counter_for_resize_op(KEY_CACHE *keycache)
546
keycache->cnt_for_resize_op++;
551
Decrement counter blocking resize key cache operation;
552
Signal the operation to proceed when counter becomes equal zero
554
static inline void dec_counter_for_resize_op(KEY_CACHE *keycache)
556
if (!--keycache->cnt_for_resize_op)
557
release_whole_queue(&keycache->waiting_for_resize_cnt);
561
Change the key cache parameters
564
change_key_cache_param()
565
keycache pointer to a key cache data structure
566
division_limit new division limit (if not zero)
567
age_threshold new age threshold (if not zero)
573
Presently the function resets the key cache parameters
574
concerning midpoint insertion strategy - division_limit and
578
void change_key_cache_param(KEY_CACHE *keycache, uint32_t division_limit,
579
uint32_t age_threshold)
581
keycache_pthread_mutex_lock(&keycache->cache_lock);
583
keycache->min_warm_blocks= (keycache->disk_blocks *
584
division_limit / 100 + 1);
586
keycache->age_threshold= (keycache->disk_blocks *
587
age_threshold / 100);
588
keycache_pthread_mutex_unlock(&keycache->cache_lock);
272
605
void end_key_cache(KEY_CACHE *keycache, bool cleanup)
607
if (!keycache->key_cache_inited)
610
if (keycache->disk_blocks > 0)
612
if (keycache->block_mem)
614
free(keycache->block_mem);
615
keycache->block_mem= NULL;
616
free((unsigned char*) keycache->block_root);
617
keycache->block_root= NULL;
619
keycache->disk_blocks= -1;
620
/* Reset blocks_changed to be safe if flush_all_key_blocks is called */
621
keycache->blocks_changed= 0;
626
pthread_mutex_destroy(&keycache->cache_lock);
627
keycache->key_cache_inited= keycache->can_be_used= 0;
276
630
} /* end_key_cache */
634
Link a thread into double-linked queue of waiting threads.
638
wqueue pointer to the queue structure
639
thread pointer to the thread to be added to the queue
645
Queue is represented by a circular list of the thread structures
646
The list is double-linked of the type (**prev,*next), accessed by
647
a pointer to the last element.
650
static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
651
struct st_my_thread_var *thread)
653
struct st_my_thread_var *last;
655
assert(!thread->next && !thread->prev);
656
if (! (last= wqueue->last_thread))
659
thread->next= thread;
660
thread->prev= &thread->next;
664
thread->prev= last->next->prev;
665
last->next->prev= &thread->next;
666
thread->next= last->next;
669
wqueue->last_thread= thread;
673
Unlink a thread from double-linked queue of waiting threads
677
wqueue pointer to the queue structure
678
thread pointer to the thread to be removed from the queue
684
See NOTES for link_into_queue
687
static void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
688
struct st_my_thread_var *thread)
690
assert(thread->next && thread->prev);
691
if (thread->next == thread)
692
/* The queue contains only one member */
693
wqueue->last_thread= NULL;
696
thread->next->prev= thread->prev;
697
*thread->prev=thread->next;
698
if (wqueue->last_thread == thread)
699
wqueue->last_thread= STRUCT_PTR(struct st_my_thread_var, next,
708
Add a thread to single-linked queue of waiting threads
712
wqueue Pointer to the queue structure.
713
mutex Cache_lock to acquire after awake.
719
Queue is represented by a circular list of the thread structures
720
The list is single-linked of the type (*next), accessed by a pointer
723
The function protects against stray signals by verifying that the
724
current thread is unlinked from the queue when awaking. However,
725
since several threads can wait for the same event, it might be
726
necessary for the caller of the function to check again if the
727
condition for awake is indeed matched.
730
static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
731
pthread_mutex_t *mutex)
733
struct st_my_thread_var *last;
734
struct st_my_thread_var *thread= my_thread_var;
737
assert(!thread->next);
738
assert(!thread->prev); /* Not required, but must be true anyway. */
739
if (! (last= wqueue->last_thread))
740
thread->next= thread;
743
thread->next= last->next;
746
wqueue->last_thread= thread;
749
Wait until thread is removed from queue by the signalling thread.
750
The loop protects against stray signals.
754
keycache_pthread_cond_wait(&thread->suspend, mutex);
756
while (thread->next);
761
Remove all threads from queue signaling them to proceed
764
release_whole_queue()
765
wqueue pointer to the queue structure
771
See notes for wait_on_queue().
772
When removed from the queue each thread is signaled via condition
773
variable thread->suspend.
776
static void release_whole_queue(KEYCACHE_WQUEUE *wqueue)
778
struct st_my_thread_var *last;
779
struct st_my_thread_var *next;
780
struct st_my_thread_var *thread;
782
/* Queue may be empty. */
783
if (!(last= wqueue->last_thread))
790
/* Signal the thread. */
791
keycache_pthread_cond_signal(&thread->suspend);
792
/* Take thread from queue. */
796
while (thread != last);
798
/* Now queue is definitely empty. */
799
wqueue->last_thread= NULL;
804
Unlink a block from the chain of dirty/clean blocks
806
static inline void unlink_changed(BLOCK_LINK *block)
808
assert(block->prev_changed && *block->prev_changed == block);
809
if (block->next_changed)
810
block->next_changed->prev_changed= block->prev_changed;
811
*block->prev_changed= block->next_changed;
812
block->next_changed= NULL;
813
block->prev_changed= NULL;
818
Link a block into the chain of dirty/clean blocks
821
static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead)
823
assert(!block->next_changed);
824
assert(!block->prev_changed);
825
block->prev_changed= phead;
826
if ((block->next_changed= *phead))
827
(*phead)->prev_changed= &block->next_changed;
833
Link a block in a chain of clean blocks of a file.
837
keycache Key cache handle
838
block Block to relink
839
file File to be linked to
840
unlink If to unlink first
843
Unlink a block from whichever chain it is linked in, if it's
844
asked for, and link it to the chain of clean blocks of the
848
Please do never set/clear BLOCK_CHANGED outside of
849
link_to_file_list() or link_to_changed_list().
850
You would risk to damage correct counting of changed blocks
851
and to find blocks in the wrong hash.
857
static void link_to_file_list(KEY_CACHE *keycache,
858
BLOCK_LINK *block, int file,
861
assert(block->status & BLOCK_IN_USE);
862
assert(block->hash_link && block->hash_link->block == block);
863
assert(block->hash_link->file == file);
865
unlink_changed(block);
866
link_changed(block, &keycache->file_blocks[FILE_HASH(file)]);
867
if (block->status & BLOCK_CHANGED)
869
block->status&= ~BLOCK_CHANGED;
870
keycache->blocks_changed--;
871
keycache->global_blocks_changed--;
877
Re-link a block from the clean chain to the dirty chain of a file.
880
link_to_changed_list()
881
keycache key cache handle
882
block block to relink
885
Unlink a block from the chain of clean blocks of a file
886
and link it to the chain of dirty blocks of the same file.
889
Please do never set/clear BLOCK_CHANGED outside of
890
link_to_file_list() or link_to_changed_list().
891
You would risk to damage correct counting of changed blocks
892
and to find blocks in the wrong hash.
898
static void link_to_changed_list(KEY_CACHE *keycache,
901
assert(block->status & BLOCK_IN_USE);
902
assert(!(block->status & BLOCK_CHANGED));
903
assert(block->hash_link && block->hash_link->block == block);
905
unlink_changed(block);
907
&keycache->changed_blocks[FILE_HASH(block->hash_link->file)]);
908
block->status|=BLOCK_CHANGED;
909
keycache->blocks_changed++;
910
keycache->global_blocks_changed++;
915
Link a block to the LRU chain at the beginning or at the end of
920
keycache pointer to a key cache data structure
921
block pointer to the block to link to the LRU chain
922
hot <-> to link the block into the hot subchain
923
at_end <-> to link the block at the end of the subchain
929
The LRU ring is represented by a circular list of block structures.
930
The list is double-linked of the type (**prev,*next) type.
931
The LRU ring is divided into two parts - hot and warm.
932
There are two pointers to access the last blocks of these two
933
parts. The beginning of the warm part follows right after the
935
Only blocks of the warm part can be used for eviction.
936
The first block from the beginning of this subchain is always
937
taken for eviction (keycache->last_used->next)
939
LRU chain: +------+ H O T +------+
940
+----| end |----...<----| beg |----+
941
| +------+last +------+ |
942
v<-link in latest hot (new end) |
943
| link in latest warm (new end)->^
944
| +------+ W A R M +------+ |
945
+----| beg |---->...----| end |----+
949
It is also possible that the block is selected for eviction and thus
950
not linked in the LRU ring.
953
static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, bool hot,
959
assert((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE));
960
assert(block->hash_link); /*backptr to block NULL from free_block()*/
961
assert(!block->requests);
962
assert(block->prev_changed && *block->prev_changed == block);
963
assert(!block->next_used);
964
assert(!block->prev_used);
965
if (!hot && keycache->waiting_for_block.last_thread)
967
/* Signal that in the LRU warm sub-chain an available block has appeared */
968
struct st_my_thread_var *last_thread=
969
keycache->waiting_for_block.last_thread;
970
struct st_my_thread_var *first_thread= last_thread->next;
971
struct st_my_thread_var *next_thread= first_thread;
972
HASH_LINK *hash_link= (HASH_LINK *) first_thread->opt_info;
973
struct st_my_thread_var *thread;
977
next_thread= thread->next;
979
We notify about the event all threads that ask
980
for the same page as the first thread in the queue
982
if ((HASH_LINK *) thread->opt_info == hash_link)
984
keycache_pthread_cond_signal(&thread->suspend);
985
unlink_from_queue(&keycache->waiting_for_block, thread);
989
while (thread != last_thread);
990
hash_link->block= block;
992
NOTE: We assigned the block to the hash_link and signalled the
993
requesting thread(s). But it is possible that other threads runs
994
first. These threads see the hash_link assigned to a block which
995
is assigned to another hash_link and not marked BLOCK_IN_SWITCH.
996
This can be a problem for functions that do not select the block
997
via its hash_link: flush and free. They do only see a block which
998
is in a "normal" state and don't know that it will be evicted soon.
1000
We cannot set BLOCK_IN_SWITCH here because only one of the
1001
requesting threads must handle the eviction. All others must wait
1002
for it to complete. If we set the flag here, the threads would not
1003
know who is in charge of the eviction. Without the flag, the first
1004
thread takes the stick and sets the flag.
1006
But we need to note in the block that is has been selected for
1007
eviction. It must not be freed. The evicting thread will not
1008
expect the block in the free list. Before freeing we could also
1009
check if block->requests > 1. But I think including another flag
1010
in the check of block->status is slightly more efficient and
1011
probably easier to read.
1013
block->status|= BLOCK_IN_EVICTION;
1016
pins= hot ? &keycache->used_ins : &keycache->used_last;
1020
ins->next_used->prev_used= &block->next_used;
1021
block->next_used= ins->next_used;
1022
block->prev_used= &ins->next_used;
1023
ins->next_used= block;
1029
/* The LRU ring is empty. Let the block point to itself. */
1030
keycache->used_last= keycache->used_ins= block->next_used= block;
1031
block->prev_used= &block->next_used;
1037
Unlink a block from the LRU chain
1041
keycache pointer to a key cache data structure
1042
block pointer to the block to unlink from the LRU chain
1048
See NOTES for link_block
1051
static void unlink_block(KEY_CACHE *keycache, BLOCK_LINK *block)
1053
assert((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE));
1054
assert(block->hash_link); /*backptr to block NULL from free_block()*/
1055
assert(!block->requests);
1056
assert(block->prev_changed && *block->prev_changed == block);
1057
assert(block->next_used && block->prev_used &&
1058
(block->next_used->prev_used == &block->next_used) &&
1059
(*block->prev_used == block));
1060
if (block->next_used == block)
1061
/* The list contains only one member */
1062
keycache->used_last= keycache->used_ins= NULL;
1065
block->next_used->prev_used= block->prev_used;
1066
*block->prev_used= block->next_used;
1067
if (keycache->used_last == block)
1068
keycache->used_last= STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used);
1069
if (keycache->used_ins == block)
1070
keycache->used_ins=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used);
1072
block->next_used= NULL;
1073
block->prev_used= NULL;
1078
Register requests for a block.
1082
keycache Pointer to a key cache data structure.
1083
block Pointer to the block to register a request on.
1084
count Number of requests. Always 1.
1087
The first request unlinks the block from the LRU ring. This means
1088
that it is protected against eveiction.
1093
static void reg_requests(KEY_CACHE *keycache, BLOCK_LINK *block, int count)
1095
assert(block->status & BLOCK_IN_USE);
1096
assert(block->hash_link);
1098
if (!block->requests)
1099
unlink_block(keycache, block);
1100
block->requests+=count;
1105
Unregister request for a block
1106
linking it to the LRU chain if it's the last request
1110
keycache pointer to a key cache data structure
1111
block pointer to the block to link to the LRU chain
1112
at_end <-> to link the block at the end of the LRU chain
1118
Every linking to the LRU ring decrements by one a special block
1119
counter (if it's positive). If the at_end parameter is true the block is
1120
added either at the end of warm sub-chain or at the end of hot sub-chain.
1121
It is added to the hot subchain if its counter is zero and number of
1122
blocks in warm sub-chain is not less than some low limit (determined by
1123
the division_limit parameter). Otherwise the block is added to the warm
1124
sub-chain. If the at_end parameter is false the block is always added
1125
at beginning of the warm sub-chain.
1126
Thus a warm block can be promoted to the hot sub-chain when its counter
1127
becomes zero for the first time.
1128
At the same time the block at the very beginning of the hot subchain
1129
might be moved to the beginning of the warm subchain if it stays untouched
1130
for a too long time (this time is determined by parameter age_threshold).
1132
It is also possible that the block is selected for eviction and thus
1133
not linked in the LRU ring.
1136
static void unreg_request(KEY_CACHE *keycache,
1137
BLOCK_LINK *block, int at_end)
1139
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1140
assert(block->hash_link); /*backptr to block NULL from free_block()*/
1141
assert(block->requests);
1142
assert(block->prev_changed && *block->prev_changed == block);
1143
assert(!block->next_used);
1144
assert(!block->prev_used);
1145
if (! --block->requests)
1148
if (block->hits_left)
1150
hot= !block->hits_left && at_end &&
1151
keycache->warm_blocks > keycache->min_warm_blocks;
1154
if (block->temperature == BLOCK_WARM)
1155
keycache->warm_blocks--;
1156
block->temperature= BLOCK_HOT;
1158
link_block(keycache, block, hot, (bool)at_end);
1159
block->last_hit_time= keycache->keycache_time;
1160
keycache->keycache_time++;
1162
At this place, the block might be in the LRU ring or not. If an
1163
evicter was waiting for a block, it was selected for eviction and
1164
not linked in the LRU ring.
1168
Check if we should link a hot block to the warm block sub-chain.
1169
It is possible that we select the same block as above. But it can
1170
also be another block. In any case a block from the LRU ring is
1171
selected. In other words it works even if the above block was
1172
selected for eviction and not linked in the LRU ring. Since this
1173
happens only if the LRU ring is empty, the block selected below
1174
would be NULL and the rest of the function skipped.
1176
block= keycache->used_ins;
1177
if (block && keycache->keycache_time - block->last_hit_time >
1178
keycache->age_threshold)
1180
unlink_block(keycache, block);
1181
link_block(keycache, block, 0, 0);
1182
if (block->temperature != BLOCK_WARM)
1184
keycache->warm_blocks++;
1185
block->temperature= BLOCK_WARM;
1192
Remove a reader of the page in block
1195
static void remove_reader(BLOCK_LINK *block)
1197
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1198
assert(block->hash_link && block->hash_link->block == block);
1199
assert(block->prev_changed && *block->prev_changed == block);
1200
assert(!block->next_used);
1201
assert(!block->prev_used);
1202
assert(block->hash_link->requests);
1203
if (! --block->hash_link->requests && block->condvar)
1204
keycache_pthread_cond_signal(block->condvar);
1209
Wait until the last reader of the page in block
1210
signals on its termination
1213
static void wait_for_readers(KEY_CACHE *keycache,
1216
struct st_my_thread_var *thread= my_thread_var;
1217
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1218
assert(!(block->status & (BLOCK_ERROR | BLOCK_IN_FLUSH |
1220
assert(block->hash_link);
1221
assert(block->hash_link->block == block);
1222
/* Linked in file_blocks or changed_blocks hash. */
1223
assert(block->prev_changed && *block->prev_changed == block);
1224
/* Not linked in LRU ring. */
1225
assert(!block->next_used);
1226
assert(!block->prev_used);
1227
while (block->hash_link->requests)
1229
/* There must be no other waiter. We have no queue here. */
1230
assert(!block->condvar);
1231
block->condvar= &thread->suspend;
1232
keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
1233
block->condvar= NULL;
280
1239
Add a hash link to a bucket in the hash_table
1253
Remove a hash link from the hash table
1256
static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link)
1258
assert(hash_link->requests == 0);
1259
if ((*hash_link->prev= hash_link->next))
1260
hash_link->next->prev= hash_link->prev;
1261
hash_link->block= NULL;
1262
if (keycache->waiting_for_hash_link.last_thread)
1264
/* Signal that a free hash link has appeared */
1265
struct st_my_thread_var *last_thread=
1266
keycache->waiting_for_hash_link.last_thread;
1267
struct st_my_thread_var *first_thread= last_thread->next;
1268
struct st_my_thread_var *next_thread= first_thread;
1269
KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->opt_info);
1270
struct st_my_thread_var *thread;
1272
hash_link->file= first_page->file;
1273
hash_link->diskpos= first_page->filepos;
1276
KEYCACHE_PAGE *page;
1277
thread= next_thread;
1278
page= (KEYCACHE_PAGE *) thread->opt_info;
1279
next_thread= thread->next;
1281
We notify about the event all threads that ask
1282
for the same page as the first thread in the queue
1284
if (page->file == hash_link->file && page->filepos == hash_link->diskpos)
1286
keycache_pthread_cond_signal(&thread->suspend);
1287
unlink_from_queue(&keycache->waiting_for_hash_link, thread);
1290
while (thread != last_thread);
1291
link_hash(&keycache->hash_root[KEYCACHE_HASH(hash_link->file,
1292
hash_link->diskpos)],
1296
hash_link->next= keycache->free_hash_list;
1297
keycache->free_hash_list= hash_link;
1302
Get the hash link for a page
1305
static HASH_LINK *get_hash_link(KEY_CACHE *keycache,
1306
int file, my_off_t filepos)
1308
register HASH_LINK *hash_link, **start;
1312
Find the bucket in the hash table for the pair (file, filepos);
1313
start contains the head of the bucket list,
1314
hash_link points to the first member of the list
1316
hash_link= *(start= &keycache->hash_root[KEYCACHE_HASH(file, filepos)]);
1317
/* Look for an element for the pair (file, filepos) in the bucket chain */
1319
(hash_link->diskpos != filepos || hash_link->file != file))
1321
hash_link= hash_link->next;
1325
/* There is no hash link in the hash table for the pair (file, filepos) */
1326
if (keycache->free_hash_list)
1328
hash_link= keycache->free_hash_list;
1329
keycache->free_hash_list= hash_link->next;
1331
else if (keycache->hash_links_used < keycache->hash_links)
1333
hash_link= &keycache->hash_link_root[keycache->hash_links_used++];
1337
/* Wait for a free hash link */
1338
struct st_my_thread_var *thread= my_thread_var;
1341
page.filepos= filepos;
1342
thread->opt_info= (void *) &page;
1343
link_into_queue(&keycache->waiting_for_hash_link, thread);
1344
keycache_pthread_cond_wait(&thread->suspend,
1345
&keycache->cache_lock);
1346
thread->opt_info= NULL;
1349
hash_link->file= file;
1350
hash_link->diskpos= filepos;
1351
link_hash(start, hash_link);
1353
/* Register the request for the page */
1354
hash_link->requests++;
1361
Get a block for the file page requested by a keycache read/write operation;
1362
If the page is not in the cache return a free block, if there is none
1363
return the lru block after saving its buffer if the page is dirty.
1368
keycache pointer to a key cache data structure
1369
file handler for the file to read page from
1370
filepos position of the page in the file
1371
init_hits_left how initialize the block counter for the page
1372
wrmode <-> get for writing
1373
page_st out {PAGE_READ,PAGE_TO_BE_READ,PAGE_WAIT_TO_BE_READ}
1376
Pointer to the found block if successful, 0 - otherwise
1379
For the page from file positioned at filepos the function checks whether
1380
the page is in the key cache specified by the first parameter.
1381
If this is the case it immediately returns the block.
1382
If not, the function first chooses a block for this page. If there is
1383
no not used blocks in the key cache yet, the function takes the block
1384
at the very beginning of the warm sub-chain. It saves the page in that
1385
block if it's dirty before returning the pointer to it.
1386
The function returns in the page_st parameter the following values:
1387
PAGE_READ - if page already in the block,
1388
PAGE_TO_BE_READ - if it is to be read yet by the current thread
1389
WAIT_TO_BE_READ - if it is to be read by another thread
1390
If an error occurs THE BLOCK_ERROR bit is set in the block status.
1391
It might happen that there are no blocks in LRU chain (in warm part) -
1392
all blocks are unlinked for some read/write operations. Then the function
1393
waits until first of this operations links any block back.
1396
static BLOCK_LINK *find_key_block(KEY_CACHE *keycache,
1397
File file, my_off_t filepos,
1399
int wrmode, int *page_st)
1401
HASH_LINK *hash_link;
1408
If the flush phase of a resize operation fails, the cache is left
1409
unusable. This will be detected only after "goto restart".
1411
if (!keycache->can_be_used)
1415
Find the hash_link for the requested file block (file, filepos). We
1416
do always get a hash_link here. It has registered our request so
1417
that no other thread can use it for another file block until we
1418
release the request (which is done by remove_reader() usually). The
1419
hash_link can have a block assigned to it or not. If there is a
1420
block, it may be assigned to this hash_link or not. In cases where a
1421
block is evicted from the cache, it is taken from the LRU ring and
1422
referenced by the new hash_link. But the block can still be assigned
1423
to its old hash_link for some time if it needs to be flushed first,
1424
or if there are other threads still reading it.
1427
hash_link is always returned.
1428
hash_link->block can be:
1430
- not assigned to this hash_link or
1431
- assigned to this hash_link. If assigned, the block can have
1432
- invalid data (when freshly assigned) or
1433
- valid data. Valid data can be
1434
- changed over the file contents (dirty) or
1435
- not changed (clean).
1437
hash_link= get_hash_link(keycache, file, filepos);
1438
assert((hash_link->file == file) && (hash_link->diskpos == filepos));
1441
if ((block= hash_link->block) &&
1442
block->hash_link == hash_link && (block->status & BLOCK_READ))
1444
/* Assigned block with valid (changed or unchanged) contents. */
1445
page_status= PAGE_READ;
1448
else (page_status == -1)
1450
- block not assigned to this hash_link or
1451
- block assigned but not yet read from file (invalid data).
1454
if (keycache->in_resize)
1456
/* This is a request during a resize operation */
1460
struct st_my_thread_var *thread;
1463
The file block is not in the cache. We don't need it in the
1464
cache: we are going to read or write directly to file. Cancel
1465
the request. We can simply decrement hash_link->requests because
1466
we did not release cache_lock since increasing it. So no other
1467
thread can wait for our request to become released.
1469
if (hash_link->requests == 1)
1472
We are the only one to request this hash_link (this file/pos).
1475
hash_link->requests--;
1476
unlink_hash(keycache, hash_link);
1481
More requests on the hash_link. Someone tries to evict a block
1482
for this hash_link (could have started before resizing started).
1483
This means that the LRU ring is empty. Otherwise a block could
1484
be assigned immediately. Behave like a thread that wants to
1485
evict a block for this file/pos. Add to the queue of threads
1486
waiting for a block. Wait until there is one assigned.
1488
Refresh the request on the hash-link so that it cannot be reused
1489
for another file/pos.
1491
thread= my_thread_var;
1492
thread->opt_info= (void *) hash_link;
1493
link_into_queue(&keycache->waiting_for_block, thread);
1496
keycache_pthread_cond_wait(&thread->suspend,
1497
&keycache->cache_lock);
1498
} while (thread->next);
1499
thread->opt_info= NULL;
1501
A block should now be assigned to the hash_link. But it may
1502
still need to be evicted. Anyway, we should re-check the
1503
situation. page_status must be set correctly.
1505
hash_link->requests--;
1507
} /* end of if (!block) */
1510
There is a block for this file/pos in the cache. Register a
1511
request on it. This unlinks it from the LRU ring (if it is there)
1512
and hence protects it against eviction (if not already in
1513
eviction). We need this for returning the block to the caller, for
1514
calling remove_reader() (for debugging purposes), and for calling
1515
free_block(). The only case where we don't need the request is if
1516
the block is in eviction. In that case we have to unregister the
1519
reg_requests(keycache, block, 1);
1521
if (page_status != PAGE_READ)
1524
- block not assigned to this hash_link or
1525
- block assigned but not yet read from file (invalid data).
1527
This must be a block in eviction. It will be read soon. We need
1528
to wait here until this happened. Otherwise the caller could
1529
access a wrong block or a block which is in read. While waiting
1530
we cannot lose hash_link nor block. We have registered a request
1531
on the hash_link. Everything can happen to the block but changes
1532
in the hash_link -> block relationship. In other words:
1533
everything can happen to the block but free or another completed
1536
Note that we bahave like a secondary requestor here. We just
1537
cannot return with PAGE_WAIT_TO_BE_READ. This would work for
1538
read requests and writes on dirty blocks that are not in flush
1539
only. Waiting here on COND_FOR_REQUESTED works in all
1542
assert(((block->hash_link != hash_link) &&
1543
(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) ||
1544
((block->hash_link == hash_link) &&
1545
!(block->status & BLOCK_READ)));
1546
wait_on_queue(&block->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock);
1548
Here we can trust that the block has been assigned to this
1549
hash_link (block->hash_link == hash_link) and read into the
1550
buffer (BLOCK_READ). The worst things possible here are that the
1551
block is in free (BLOCK_REASSIGNED). But the block is still
1552
assigned to the hash_link. The freeing thread waits until we
1553
release our request on the hash_link. The block must not be
1554
again in eviction because we registered an request on it before
1557
assert(block->hash_link == hash_link);
1558
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1559
assert(!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH)));
1562
The block is in the cache. Assigned to the hash_link. Valid data.
1563
Note that in case of page_st == PAGE_READ, the block can be marked
1564
for eviction. In any case it can be marked for freeing.
1569
/* A reader can just read the block. */
1570
*page_st= PAGE_READ;
1571
assert((hash_link->file == file) &&
1572
(hash_link->diskpos == filepos) &&
1573
(block->hash_link == hash_link));
1578
This is a writer. No two writers for the same block can exist.
1579
This must be assured by locks outside of the key cache.
1581
assert(!(block->status & BLOCK_FOR_UPDATE));
1583
while (block->status & BLOCK_IN_FLUSH)
1586
Wait until the block is flushed to file. Do not release the
1587
request on the hash_link yet to prevent that the block is freed
1588
or reassigned while we wait. While we wait, several things can
1589
happen to the block, including another flush. But the block
1590
cannot be reassigned to another hash_link until we release our
1591
request on it. But it can be marked BLOCK_REASSIGNED from free
1592
or eviction, while they wait for us to release the hash_link.
1594
wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock);
1596
If the flush phase failed, the resize could have finished while
1599
if (!keycache->in_resize)
1601
remove_reader(block);
1602
unreg_request(keycache, block, 1);
1605
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1606
assert(!(block->status & BLOCK_FOR_UPDATE));
1607
assert(block->hash_link == hash_link);
1610
if (block->status & BLOCK_CHANGED)
1613
We want to write a block with changed contents. If the cache
1614
block size is bigger than the callers block size (e.g. MyISAM),
1615
the caller may replace part of the block only. Changes of the
1616
other part of the block must be preserved. Since the block has
1617
not yet been selected for flush, we can still add our changes.
1619
*page_st= PAGE_READ;
1620
assert((hash_link->file == file) &&
1621
(hash_link->diskpos == filepos) &&
1622
(block->hash_link == hash_link));
1627
This is a write request for a clean block. We do not want to have
1628
new dirty blocks in the cache while resizing. We will free the
1629
block and write directly to file. If the block is in eviction or
1630
in free, we just let it go.
1632
Unregister from the hash_link. This must be done before freeing
1633
the block. And it must be done if not freeing the block. Because
1634
we could have waited above, we need to call remove_reader(). Other
1635
threads could wait for us to release our request on the hash_link.
1637
remove_reader(block);
1639
/* If the block is not in eviction and not in free, we can free it. */
1640
if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
1644
Free block as we are going to write directly to file.
1645
Although we have an exlusive lock for the updated key part,
1646
the control can be yielded by the current thread as we might
1647
have unfinished readers of other key parts in the block
1648
buffer. Still we are guaranteed not to have any readers
1649
of the key part we are writing into until the block is
1650
removed from the cache as we set the BLOCK_REASSIGNED
1651
flag (see the code below that handles reading requests).
1653
free_block(keycache, block);
1658
The block will be evicted/freed soon. Don't touch it in any way.
1659
Unregister the request that we registered above.
1661
unreg_request(keycache, block, 1);
1664
The block is still assigned to the hash_link (the file/pos that
1665
we are going to write to). Wait until the eviction/free is
1666
complete. Otherwise the direct write could complete before all
1667
readers are done with the block. So they could read outdated
1670
Since we released our request on the hash_link, it can be reused
1671
for another file/pos. Hence we cannot just check for
1672
block->hash_link == hash_link. As long as the resize is
1673
proceeding the block cannot be reassigned to the same file/pos
1674
again. So we can terminate the loop when the block is no longer
1675
assigned to this file/pos.
1679
wait_on_queue(&block->wqueue[COND_FOR_SAVED],
1680
&keycache->cache_lock);
1682
If the flush phase failed, the resize could have finished
1683
while we waited here.
1685
if (!keycache->in_resize)
1687
} while (block->hash_link &&
1688
(block->hash_link->file == file) &&
1689
(block->hash_link->diskpos == filepos));
1694
if (page_status == PAGE_READ &&
1695
(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
1699
This is a request for a block to be removed from cache. The block
1700
is assigned to this hash_link and contains valid data, but is
1701
marked for eviction or to be freed. Possible reasons why it has
1702
not yet been evicted/freed can be a flush before reassignment
1703
(BLOCK_IN_SWITCH), readers of the block have not finished yet
1704
(BLOCK_REASSIGNED), or the evicting thread did not yet awake after
1705
the block has been selected for it (BLOCK_IN_EVICTION).
1707
Only reading requests can proceed until the old dirty page is flushed,
1708
all others are to be suspended, then resubmitted
1710
if (!wrmode && !(block->status & BLOCK_REASSIGNED))
1713
This is a read request and the block not yet reassigned. We can
1714
register our request and proceed. This unlinks the block from
1715
the LRU ring and protects it against eviction.
1717
reg_requests(keycache, block, 1);
1722
Either this is a write request for a block that is in eviction
1723
or in free. We must not use it any more. Instead we must evict
1724
another block. But we cannot do this before the eviction/free is
1725
done. Otherwise we would find the same hash_link + block again
1728
Or this is a read request for a block in eviction/free that does
1729
not require a flush, but waits for readers to finish with the
1730
block. We do not read this block to let the eviction/free happen
1731
as soon as possible. Again we must wait so that we don't find
1732
the same hash_link + block again and again.
1734
assert(hash_link->requests);
1735
hash_link->requests--;
1736
wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock);
1738
The block is no longer assigned to this hash_link.
1747
This is a request for a new block or for a block not to be removed.
1750
- block not assigned to this hash_link or
1751
- block assigned but not yet read from file,
1753
- block assigned with valid (changed or unchanged) data and
1754
- it will not be reassigned/freed.
1758
/* No block is assigned to the hash_link yet. */
1759
if (keycache->blocks_unused)
1761
if (keycache->free_block_list)
1763
/* There is a block in the free list. */
1764
block= keycache->free_block_list;
1765
keycache->free_block_list= block->next_used;
1766
block->next_used= NULL;
1770
/* There are some never used blocks, take first of them */
1771
assert(keycache->blocks_used <
1772
(uint32_t) keycache->disk_blocks);
1773
block= &keycache->block_root[keycache->blocks_used];
1774
block->buffer= ADD_TO_PTR(keycache->block_mem,
1775
((uint32_t) keycache->blocks_used*
1776
keycache->key_cache_block_size),
1778
keycache->blocks_used++;
1779
assert(!block->next_used);
1781
assert(!block->prev_used);
1782
assert(!block->next_changed);
1783
assert(!block->prev_changed);
1784
assert(!block->hash_link);
1785
assert(!block->status);
1786
assert(!block->requests);
1787
keycache->blocks_unused--;
1788
block->status= BLOCK_IN_USE;
1790
block->offset= keycache->key_cache_block_size;
1792
block->temperature= BLOCK_COLD;
1793
block->hits_left= init_hits_left;
1794
block->last_hit_time= 0;
1795
block->hash_link= hash_link;
1796
hash_link->block= block;
1797
link_to_file_list(keycache, block, file, 0);
1798
page_status= PAGE_TO_BE_READ;
1803
There are no free blocks and no never used blocks, use a block
1807
if (! keycache->used_last)
1810
The LRU ring is empty. Wait until a new block is added to
1811
it. Several threads might wait here for the same hash_link,
1812
all of them must get the same block. While waiting for a
1813
block, after a block is selected for this hash_link, other
1814
threads can run first before this one awakes. During this
1815
time interval other threads find this hash_link pointing to
1816
the block, which is still assigned to another hash_link. In
1817
this case the block is not marked BLOCK_IN_SWITCH yet, but
1818
it is marked BLOCK_IN_EVICTION.
1821
struct st_my_thread_var *thread= my_thread_var;
1822
thread->opt_info= (void *) hash_link;
1823
link_into_queue(&keycache->waiting_for_block, thread);
1826
keycache_pthread_cond_wait(&thread->suspend,
1827
&keycache->cache_lock);
1829
while (thread->next);
1830
thread->opt_info= NULL;
1831
/* Assert that block has a request registered. */
1832
assert(hash_link->block->requests);
1833
/* Assert that block is not in LRU ring. */
1834
assert(!hash_link->block->next_used);
1835
assert(!hash_link->block->prev_used);
1838
If we waited above, hash_link->block has been assigned by
1839
link_block(). Otherwise it is still NULL. In the latter case
1840
we need to grab a block from the LRU ring ourselves.
1842
block= hash_link->block;
1845
/* Select the last block from the LRU ring. */
1846
block= keycache->used_last->next_used;
1847
block->hits_left= init_hits_left;
1848
block->last_hit_time= 0;
1849
hash_link->block= block;
1851
Register a request on the block. This unlinks it from the
1852
LRU ring and protects it against eviction.
1854
assert(!block->requests);
1855
reg_requests(keycache, block,1);
1857
We do not need to set block->status|= BLOCK_IN_EVICTION here
1858
because we will set block->status|= BLOCK_IN_SWITCH
1859
immediately without releasing the lock in between. This does
1860
also support debugging. When looking at the block, one can
1861
see if the block has been selected by link_block() after the
1862
LRU ring was empty, or if it was grabbed directly from the
1863
LRU ring in this branch.
1868
If we had to wait above, there is a small chance that another
1869
thread grabbed this block for the same file block already. But
1870
in most cases the first condition is true.
1872
if (block->hash_link != hash_link &&
1873
! (block->status & BLOCK_IN_SWITCH) )
1875
/* this is a primary request for a new page */
1876
block->status|= BLOCK_IN_SWITCH;
1878
if (block->status & BLOCK_CHANGED)
1880
/* The block contains a dirty page - push it out of the cache */
1882
if (block->status & BLOCK_IN_FLUSH)
1885
The block is marked for flush. If we do not wait here,
1886
it could happen that we write the block, reassign it to
1887
another file block, then, before the new owner can read
1888
the new file block, the flusher writes the cache block
1889
(which still has the old contents) to the new file block!
1891
wait_on_queue(&block->wqueue[COND_FOR_SAVED],
1892
&keycache->cache_lock);
1894
The block is marked BLOCK_IN_SWITCH. It should be left
1895
alone except for reading. No free, no write.
1897
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1898
assert(!(block->status & (BLOCK_REASSIGNED |
1900
BLOCK_FOR_UPDATE)));
1904
block->status|= BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE;
1906
BLOCK_IN_EVICTION may be true or not. Other flags must
1909
assert((block->status & ~BLOCK_IN_EVICTION) ==
1910
(BLOCK_READ | BLOCK_IN_SWITCH |
1911
BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE |
1912
BLOCK_CHANGED | BLOCK_IN_USE));
1913
assert(block->hash_link);
1915
keycache_pthread_mutex_unlock(&keycache->cache_lock);
1917
The call is thread safe because only the current
1918
thread might change the block->hash_link value
1920
error= (pwrite(block->hash_link->file,
1921
block->buffer+block->offset,
1922
block->length - block->offset,
1923
block->hash_link->diskpos+ block->offset) == 0);
1924
keycache_pthread_mutex_lock(&keycache->cache_lock);
1926
/* Block status must not have changed. */
1927
assert((block->status & ~BLOCK_IN_EVICTION) ==
1928
(BLOCK_READ | BLOCK_IN_SWITCH |
1929
BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE |
1930
BLOCK_CHANGED | BLOCK_IN_USE));
1931
keycache->global_cache_write++;
1935
block->status|= BLOCK_REASSIGNED;
1937
The block comes from the LRU ring. It must have a hash_link
1940
assert(block->hash_link);
1941
if (block->hash_link)
1944
All pending requests for this page must be resubmitted.
1945
This must be done before waiting for readers. They could
1946
wait for the flush to complete. And we must also do it
1947
after the wait. Flushers might try to free the block while
1948
we wait. They would wait until the reassignment is
1949
complete. Also the block status must reflect the correct
1950
situation: The block is not changed nor in flush any more.
1951
Note that we must not change the BLOCK_CHANGED flag
1952
outside of link_to_file_list() so that it is always in the
1953
correct queue and the *blocks_changed counters are
1956
block->status&= ~(BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE);
1957
link_to_file_list(keycache, block, block->hash_link->file, 1);
1958
release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
1960
The block is still assigned to its old hash_link.
1961
Wait until all pending read requests
1962
for this page are executed
1963
(we could have avoided this waiting, if we had read
1964
a page in the cache in a sweep, without yielding control)
1966
wait_for_readers(keycache, block);
1967
assert(block->hash_link && block->hash_link->block == block &&
1968
block->prev_changed);
1969
/* The reader must not have been a writer. */
1970
assert(!(block->status & BLOCK_CHANGED));
1972
/* Wake flushers that might have found the block in between. */
1973
release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
1975
/* Remove the hash link for the old file block from the hash. */
1976
unlink_hash(keycache, block->hash_link);
1979
For sanity checks link_to_file_list() asserts that block
1980
and hash_link refer to each other. Hence we need to assign
1981
the hash_link first, but then we would not know if it was
1982
linked before. Hence we would not know if to unlink it. So
1983
unlink it here and call link_to_file_list(..., false).
1985
unlink_changed(block);
1987
block->status= error ? BLOCK_ERROR : BLOCK_IN_USE ;
1989
block->offset= keycache->key_cache_block_size;
1990
block->hash_link= hash_link;
1991
link_to_file_list(keycache, block, file, 0);
1992
page_status= PAGE_TO_BE_READ;
1994
assert(block->hash_link->block == block);
1995
assert(hash_link->block->hash_link == hash_link);
2000
Either (block->hash_link == hash_link),
2001
or (block->status & BLOCK_IN_SWITCH).
2003
This is for secondary requests for a new file block only.
2004
Either it is already assigned to the new hash_link meanwhile
2005
(if we had to wait due to empty LRU), or it is already in
2006
eviction by another thread. Since this block has been
2007
grabbed from the LRU ring and attached to this hash_link,
2008
another thread cannot grab the same block from the LRU ring
2009
anymore. If the block is in eviction already, it must become
2010
attached to the same hash_link and as such destined for the
2013
page_status= (((block->hash_link == hash_link) &&
2014
(block->status & BLOCK_READ)) ?
2015
PAGE_READ : PAGE_WAIT_TO_BE_READ);
2022
Block is not NULL. This hash_link points to a block.
2024
- block not assigned to this hash_link (yet) or
2025
- block assigned but not yet read from file,
2027
- block assigned with valid (changed or unchanged) data and
2028
- it will not be reassigned/freed.
2030
The first condition means hash_link points to a block in
2031
eviction. This is not necessarily marked by BLOCK_IN_SWITCH yet.
2032
But then it is marked BLOCK_IN_EVICTION. See the NOTE in
2033
link_block(). In both cases it is destined for this hash_link
2034
and its file block address. When this hash_link got its block
2035
address, the block was removed from the LRU ring and cannot be
2036
selected for eviction (for another hash_link) again.
2038
Register a request on the block. This is another protection
2041
assert(((block->hash_link != hash_link) &&
2042
(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) ||
2043
((block->hash_link == hash_link) &&
2044
!(block->status & BLOCK_READ)) ||
2045
((block->status & BLOCK_READ) &&
2046
!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))));
2047
reg_requests(keycache, block, 1);
2048
page_status= (((block->hash_link == hash_link) &&
2049
(block->status & BLOCK_READ)) ?
2050
PAGE_READ : PAGE_WAIT_TO_BE_READ);
2054
assert(page_status != -1);
2055
/* Same assert basically, but be very sure. */
2057
/* Assert that block has a request and is not in LRU ring. */
2058
assert(block->requests);
2059
assert(!block->next_used);
2060
assert(!block->prev_used);
2061
/* Assert that we return the correct block. */
2062
assert((page_status == PAGE_WAIT_TO_BE_READ) ||
2063
((block->hash_link->file == file) &&
2064
(block->hash_link->diskpos == filepos)));
2065
*page_st=page_status;
2072
Read into a key cache block buffer from disk.
2077
keycache pointer to a key cache data structure
2078
block block to which buffer the data is to be read
2079
read_length size of data to be read
2080
min_length at least so much data must be read
2081
primary <-> the current thread will read the data
2087
The function either reads a page data from file to the block buffer,
2088
or waits until another thread reads it. What page to read is determined
2089
by a block parameter - reference to a hash link for this page.
2090
If an error occurs THE BLOCK_ERROR bit is set in the block status.
2091
We do not report error when the size of successfully read
2092
portion is less than read_length, but not less than min_length.
2095
static void read_block(KEY_CACHE *keycache,
2096
BLOCK_LINK *block, uint32_t read_length,
2097
uint32_t min_length, bool primary)
2099
uint32_t got_length;
2101
/* On entry cache_lock is locked */
2106
This code is executed only by threads that submitted primary
2107
requests. Until block->status contains BLOCK_READ, all other
2108
request for the block become secondary requests. For a primary
2109
request the block must be properly initialized.
2111
assert(((block->status & ~BLOCK_FOR_UPDATE) == BLOCK_IN_USE));
2112
assert((block->length == 0));
2113
assert((block->offset == keycache->key_cache_block_size));
2114
assert((block->requests > 0));
2116
keycache->global_cache_read++;
2117
/* Page is not in buffer yet, is to be read from disk */
2118
keycache_pthread_mutex_unlock(&keycache->cache_lock);
2120
Here other threads may step in and register as secondary readers.
2121
They will register in block->wqueue[COND_FOR_REQUESTED].
2123
got_length= pread(block->hash_link->file, block->buffer, read_length, block->hash_link->diskpos);
2124
keycache_pthread_mutex_lock(&keycache->cache_lock);
2126
The block can now have been marked for free (in case of
2127
FLUSH_RELEASE). Otherwise the state must be unchanged.
2129
assert(((block->status & ~(BLOCK_REASSIGNED |
2130
BLOCK_FOR_UPDATE)) == BLOCK_IN_USE));
2131
assert((block->length == 0));
2132
assert((block->offset == keycache->key_cache_block_size));
2133
assert((block->requests > 0));
2135
if (got_length < min_length)
2136
block->status|= BLOCK_ERROR;
2139
block->status|= BLOCK_READ;
2140
block->length= got_length;
2142
Do not set block->offset here. If this block is marked
2143
BLOCK_CHANGED later, we want to flush only the modified part. So
2144
only a writer may set block->offset down from
2145
keycache->key_cache_block_size.
2148
/* Signal that all pending requests for this page now can be processed */
2149
release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]);
2154
This code is executed only by threads that submitted secondary
2155
requests. At this point it could happen that the cache block is
2156
not yet assigned to the hash_link for the requested file block.
2157
But at awake from the wait this should be the case. Unfortunately
2158
we cannot assert this here because we do not know the hash_link
2159
for the requested file block nor the file and position. So we have
2160
to assert this in the caller.
2162
wait_on_queue(&block->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock);
294
2168
Read a block of data from a cached file into a buffer;
361
2387
int key_cache_insert(KEY_CACHE *keycache,
362
int file, internal::my_off_t filepos, int level,
2388
File file, my_off_t filepos, int level,
363
2389
unsigned char *buff, uint32_t length)
371
assert (!keycache->key_cache_inited);
2393
if (keycache->key_cache_inited)
2395
/* Key cache is used */
2396
register BLOCK_LINK *block;
2397
uint32_t read_length;
2400
bool locked_and_incremented= false;
2403
When the keycache is once initialized, we use the cache_lock to
2404
reliably distinguish the cases of normal operation, resizing, and
2405
disabled cache. We always increment and decrement
2406
'cnt_for_resize_op' so that a resizer can wait for pending I/O.
2408
keycache_pthread_mutex_lock(&keycache->cache_lock);
2410
We do not load index data into a disabled cache nor into an
2413
if (!keycache->can_be_used || keycache->in_resize)
2415
/* Register the pseudo I/O for the next resize. */
2416
inc_counter_for_resize_op(keycache);
2417
locked_and_incremented= true;
2418
/* Loaded data may not always be aligned to cache blocks. */
2419
offset= (uint) (filepos % keycache->key_cache_block_size);
2420
/* Load data in key_cache_block_size increments. */
2423
/* Cache could be disabled or resizing in a later iteration. */
2424
if (!keycache->can_be_used || keycache->in_resize)
2426
/* Start loading at the beginning of the cache block. */
2428
/* Do not load beyond the end of the cache block. */
2429
read_length= length;
2430
set_if_smaller(read_length, keycache->key_cache_block_size-offset);
2431
assert(read_length > 0);
2433
/* The block has been read by the caller already. */
2434
keycache->global_cache_read++;
2435
/* Request the cache block that matches file/pos. */
2436
keycache->global_cache_r_requests++;
2437
block= find_key_block(keycache, file, filepos, level, 0, &page_st);
2441
This happens only for requests submitted during key cache
2442
resize. The block is not in the cache and shall not go in.
2443
Stop loading index data.
2447
if (!(block->status & BLOCK_ERROR))
2449
if ((page_st == PAGE_WAIT_TO_BE_READ) ||
2450
((page_st == PAGE_TO_BE_READ) &&
2451
(offset || (read_length < keycache->key_cache_block_size))))
2456
this is a secondary request for a block to be read into the
2457
cache. The block is in eviction. It is not yet assigned to
2458
the requested file block (It does not point to the right
2459
hash_link). So we cannot call remove_reader() on the block.
2460
And we cannot access the hash_link directly here. We need to
2461
wait until the assignment is complete. read_block() executes
2462
the correct wait when called with primary == false.
2466
this is a primary request for a block to be read into the
2467
cache and the supplied data does not fill the whole block.
2469
This function is called on behalf of a LOAD INDEX INTO CACHE
2470
statement, which is a read-only task and allows other
2471
readers. It is possible that a parallel running reader tries
2472
to access this block. If it needs more data than has been
2473
supplied here, it would report an error. To be sure that we
2474
have all data in the block that is available in the file, we
2475
read the block ourselves.
2477
Though reading again what the caller did read already is an
2478
expensive operation, we need to do this for correctness.
2480
read_block(keycache, block, keycache->key_cache_block_size,
2481
read_length + offset, (page_st == PAGE_TO_BE_READ));
2483
A secondary request must now have the block assigned to the
2484
requested file block. It does not hurt to check it for
2485
primary requests too.
2487
assert(keycache->can_be_used);
2488
assert(block->hash_link->file == file);
2489
assert(block->hash_link->diskpos == filepos);
2490
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
2492
else if (page_st == PAGE_TO_BE_READ)
2495
This is a new block in the cache. If we come here, we have
2496
data for the whole block.
2498
assert(block->hash_link->requests);
2499
assert(block->status & BLOCK_IN_USE);
2500
assert((page_st == PAGE_TO_BE_READ) ||
2501
(block->status & BLOCK_READ));
2503
#if !defined(SERIALIZED_READ_FROM_CACHE)
2504
keycache_pthread_mutex_unlock(&keycache->cache_lock);
2506
Here other threads may step in and register as secondary readers.
2507
They will register in block->wqueue[COND_FOR_REQUESTED].
2511
/* Copy data from buff */
2512
memcpy(block->buffer+offset, buff, (size_t) read_length);
2514
#if !defined(SERIALIZED_READ_FROM_CACHE)
2515
keycache_pthread_mutex_lock(&keycache->cache_lock);
2516
assert(block->status & BLOCK_IN_USE);
2517
assert((page_st == PAGE_TO_BE_READ) ||
2518
(block->status & BLOCK_READ));
2521
After the data is in the buffer, we can declare the block
2522
valid. Now other threads do not need to register as
2523
secondary readers any more. They can immediately access the
2526
block->status|= BLOCK_READ;
2527
block->length= read_length+offset;
2529
Do not set block->offset here. If this block is marked
2530
BLOCK_CHANGED later, we want to flush only the modified part. So
2531
only a writer may set block->offset down from
2532
keycache->key_cache_block_size.
2534
/* Signal all pending requests. */
2535
release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]);
2540
page_st == PAGE_READ. The block is in the buffer. All data
2541
must already be present. Blocks are always read with all
2542
data available on file. Assert that the block does not have
2543
less contents than the preloader supplies. If the caller has
2544
data beyond block->length, it means that a file write has
2545
been done while this block was in cache and not extended
2546
with the new data. If the condition is met, we can simply
2549
assert((page_st == PAGE_READ) &&
2550
(read_length + offset <= block->length));
2554
A secondary request must now have the block assigned to the
2555
requested file block. It does not hurt to check it for primary
2558
assert(block->hash_link->file == file);
2559
assert(block->hash_link->diskpos == filepos);
2560
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
2561
} /* end of if (!(block->status & BLOCK_ERROR)) */
2564
remove_reader(block);
2567
Link the block into the LRU ring if it's the last submitted
2568
request for the block. This enables eviction for the block.
2570
unreg_request(keycache, block, 1);
2572
error= (block->status & BLOCK_ERROR);
2578
filepos+= read_length+offset;
2581
} while ((length-= read_length));
2584
if (locked_and_incremented)
2585
dec_counter_for_resize_op(keycache);
2586
keycache_pthread_mutex_unlock(&keycache->cache_lock);
405
2621
int key_cache_write(KEY_CACHE *keycache,
406
int file, internal::my_off_t filepos, int level,
2622
File file, my_off_t filepos, int level,
407
2623
unsigned char *buff, uint32_t length,
408
uint32_t block_length,
2624
uint32_t block_length __attribute__((unused)),
2627
bool locked_and_incremented= false;
415
2630
if (!dont_write)
2632
/* purecov: begin inspected */
417
2633
/* Not used in the server. */
418
2634
/* Force writing from buff into disk. */
2635
keycache->global_cache_w_requests++;
2636
keycache->global_cache_write++;
419
2637
if (pwrite(file, buff, length, filepos) == 0)
423
assert (!keycache->key_cache_inited);
2642
if (keycache->key_cache_inited)
2644
/* Key cache is used */
2645
register BLOCK_LINK *block;
2646
uint32_t read_length;
2651
When the key cache is once initialized, we use the cache_lock to
2652
reliably distinguish the cases of normal operation, resizing, and
2653
disabled cache. We always increment and decrement
2654
'cnt_for_resize_op' so that a resizer can wait for pending I/O.
2656
keycache_pthread_mutex_lock(&keycache->cache_lock);
2658
Cache resizing has two phases: Flushing and re-initializing. In
2659
the flush phase write requests can modify dirty blocks that are
2660
not yet in flush. Otherwise they are allowed to bypass the cache.
2661
find_key_block() returns NULL in both cases (clean blocks and
2664
After the flush phase new I/O requests must wait until the
2665
re-initialization is done. The re-initialization can be done only
2666
if no I/O request is in progress. The reason is that
2667
key_cache_block_size can change. With enabled cache I/O is done in
2668
chunks of key_cache_block_size. Every chunk tries to use a cache
2669
block first. If the block size changes in the middle, a block
2670
could be missed and data could be written below a cached block.
2672
while (keycache->in_resize && !keycache->resize_in_flush)
2673
wait_on_queue(&keycache->resize_queue, &keycache->cache_lock);
2674
/* Register the I/O for the next resize. */
2675
inc_counter_for_resize_op(keycache);
2676
locked_and_incremented= true;
2677
/* Requested data may not always be aligned to cache blocks. */
2678
offset= (uint) (filepos % keycache->key_cache_block_size);
2679
/* Write data in key_cache_block_size increments. */
2682
/* Cache could be disabled in a later iteration. */
2683
if (!keycache->can_be_used)
2685
/* Start writing at the beginning of the cache block. */
2687
/* Do not write beyond the end of the cache block. */
2688
read_length= length;
2689
set_if_smaller(read_length, keycache->key_cache_block_size-offset);
2690
assert(read_length > 0);
2692
/* Request the cache block that matches file/pos. */
2693
keycache->global_cache_w_requests++;
2694
block= find_key_block(keycache, file, filepos, level, 1, &page_st);
2698
This happens only for requests submitted during key cache
2699
resize. The block is not in the cache and shall not go in.
2700
Write directly to file.
2704
/* Used in the server. */
2705
keycache->global_cache_write++;
2706
keycache_pthread_mutex_unlock(&keycache->cache_lock);
2707
if (pwrite(file, (unsigned char*) buff, read_length, filepos + offset) == 0)
2709
keycache_pthread_mutex_lock(&keycache->cache_lock);
2714
Prevent block from flushing and from being selected for to be
2715
freed. This must be set when we release the cache_lock.
2716
However, we must not set the status of the block before it is
2717
assigned to this file/pos.
2719
if (page_st != PAGE_WAIT_TO_BE_READ)
2720
block->status|= BLOCK_FOR_UPDATE;
2722
We must read the file block first if it is not yet in the cache
2723
and we do not replace all of its contents.
2725
In cases where the cache block is big enough to contain (parts
2726
of) index blocks of different indexes, our request can be
2727
secondary (PAGE_WAIT_TO_BE_READ). In this case another thread is
2728
reading the file block. If the read completes after us, it
2729
overwrites our new contents with the old contents. So we have to
2730
wait for the other thread to complete the read of this block.
2731
read_block() takes care for the wait.
2733
if (!(block->status & BLOCK_ERROR) &&
2734
((page_st == PAGE_TO_BE_READ &&
2735
(offset || read_length < keycache->key_cache_block_size)) ||
2736
(page_st == PAGE_WAIT_TO_BE_READ)))
2738
read_block(keycache, block,
2739
offset + read_length >= keycache->key_cache_block_size?
2740
offset : keycache->key_cache_block_size,
2741
offset, (page_st == PAGE_TO_BE_READ));
2742
assert(keycache->can_be_used);
2743
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
2745
Prevent block from flushing and from being selected for to be
2746
freed. This must be set when we release the cache_lock.
2747
Here we set it in case we could not set it above.
2749
block->status|= BLOCK_FOR_UPDATE;
2752
The block should always be assigned to the requested file block
2753
here. It need not be BLOCK_READ when overwriting the whole block.
2755
assert(block->hash_link->file == file);
2756
assert(block->hash_link->diskpos == filepos);
2757
assert(block->status & BLOCK_IN_USE);
2758
assert((page_st == PAGE_TO_BE_READ) || (block->status & BLOCK_READ));
2760
The block to be written must not be marked BLOCK_REASSIGNED.
2761
Otherwise it could be freed in dirty state or reused without
2762
another flush during eviction. It must also not be in flush.
2763
Otherwise the old contens may have been flushed already and
2764
the flusher could clear BLOCK_CHANGED without flushing the
2767
assert(!(block->status & BLOCK_REASSIGNED));
2769
while (block->status & BLOCK_IN_FLUSHWRITE)
2772
Another thread is flushing the block. It was dirty already.
2773
Wait until the block is flushed to file. Otherwise we could
2774
modify the buffer contents just while it is written to file.
2775
An unpredictable file block contents would be the result.
2776
While we wait, several things can happen to the block,
2777
including another flush. But the block cannot be reassigned to
2778
another hash_link until we release our request on it.
2780
wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock);
2781
assert(keycache->can_be_used);
2782
assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
2783
/* Still must not be marked for free. */
2784
assert(!(block->status & BLOCK_REASSIGNED));
2785
assert(block->hash_link && (block->hash_link->block == block));
2789
We could perhaps release the cache_lock during access of the
2790
data like in the other functions. Locks outside of the key cache
2791
assure that readers and a writer do not access the same range of
2792
data. Parallel accesses should happen only if the cache block
2793
contains multiple index block(fragment)s. So different parts of
2794
the buffer would be read/written. An attempt to flush during
2795
memcpy() is prevented with BLOCK_FOR_UPDATE.
2797
if (!(block->status & BLOCK_ERROR))
2799
#if !defined(SERIALIZED_READ_FROM_CACHE)
2800
keycache_pthread_mutex_unlock(&keycache->cache_lock);
2802
memcpy(block->buffer+offset, buff, (size_t) read_length);
2804
#if !defined(SERIALIZED_READ_FROM_CACHE)
2805
keycache_pthread_mutex_lock(&keycache->cache_lock);
2811
/* Not used in the server. buff has been written to disk at start. */
2812
if ((block->status & BLOCK_CHANGED) &&
2813
(!offset && read_length >= keycache->key_cache_block_size))
2814
link_to_file_list(keycache, block, block->hash_link->file, 1);
2816
else if (! (block->status & BLOCK_CHANGED))
2817
link_to_changed_list(keycache, block);
2818
block->status|=BLOCK_READ;
2820
Allow block to be selected for to be freed. Since it is marked
2821
BLOCK_CHANGED too, it won't be selected for to be freed without
2824
block->status&= ~BLOCK_FOR_UPDATE;
2825
set_if_smaller(block->offset, offset);
2826
set_if_bigger(block->length, read_length+offset);
2828
/* Threads may be waiting for the changes to be complete. */
2829
release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]);
2832
If only a part of the cache block is to be replaced, and the
2833
rest has been read from file, then the cache lock has been
2834
released for I/O and it could be possible that another thread
2835
wants to evict or free the block and waits for it to be
2836
released. So we must not just decrement hash_link->requests, but
2837
also wake a waiting thread.
2839
remove_reader(block);
2842
Link the block into the LRU ring if it's the last submitted
2843
request for the block. This enables eviction for the block.
2845
unreg_request(keycache, block, 1);
2847
if (block->status & BLOCK_ERROR)
2855
filepos+= read_length+offset;
2858
} while ((length-= read_length));
425
2863
/* Key cache is not used */
428
2866
/* Used in the server. */
2867
keycache->global_cache_w_requests++;
2868
keycache->global_cache_write++;
2869
if (locked_and_incremented)
2870
keycache_pthread_mutex_unlock(&keycache->cache_lock);
429
2871
if (pwrite(file, (unsigned char*) buff, length, filepos) == 0)
2873
if (locked_and_incremented)
2874
keycache_pthread_mutex_lock(&keycache->cache_lock);
2878
if (locked_and_incremented)
2880
dec_counter_for_resize_op(keycache);
2881
keycache_pthread_mutex_unlock(&keycache->cache_lock);
2892
keycache Pointer to a key cache data structure
2893
block Pointer to the block to free
2896
Remove reference to block from hash table.
2897
Remove block from the chain of clean blocks.
2898
Add block to the free list.
2901
Block must not be free (status == 0).
2902
Block must not be in free_block_list.
2903
Block must not be in the LRU ring.
2904
Block must not be in eviction (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH).
2905
Block must not be in free (BLOCK_REASSIGNED).
2906
Block must not be in flush (BLOCK_IN_FLUSH).
2907
Block must not be dirty (BLOCK_CHANGED).
2908
Block must not be in changed_blocks (dirty) hash.
2909
Block must be in file_blocks (clean) hash.
2910
Block must refer to a hash_link.
2911
Block must have a request registered on it.
2914
static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block)
2917
Assert that the block is not free already. And that it is in a clean
2918
state. Note that the block might just be assigned to a hash_link and
2919
not yet read (BLOCK_READ may not be set here). In this case a reader
2920
is registered in the hash_link and free_block() will wait for it
2923
assert((block->status & BLOCK_IN_USE) &&
2924
!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
2925
BLOCK_REASSIGNED | BLOCK_IN_FLUSH |
2926
BLOCK_CHANGED | BLOCK_FOR_UPDATE)));
2927
/* Assert that the block is in a file_blocks chain. */
2928
assert(block->prev_changed && *block->prev_changed == block);
2929
/* Assert that the block is not in the LRU ring. */
2930
assert(!block->next_used && !block->prev_used);
2932
IMHO the below condition (if()) makes no sense. I can't see how it
2933
could be possible that free_block() is entered with a NULL hash_link
2934
pointer. The only place where it can become NULL is in free_block()
2935
(or before its first use ever, but for those blocks free_block() is
2936
not called). I don't remove the conditional as it cannot harm, but
2937
place an assert to confirm my hypothesis. Eventually the
2938
condition (if()) can be removed.
2940
assert(block->hash_link && block->hash_link->block == block);
2941
if (block->hash_link)
2944
While waiting for readers to finish, new readers might request the
2945
block. But since we set block->status|= BLOCK_REASSIGNED, they
2946
will wait on block->wqueue[COND_FOR_SAVED]. They must be signalled
2949
block->status|= BLOCK_REASSIGNED;
2950
wait_for_readers(keycache, block);
2952
The block must not have been freed by another thread. Repeat some
2953
checks. An additional requirement is that it must be read now
2956
assert(block->hash_link && block->hash_link->block == block);
2957
assert((block->status & (BLOCK_READ | BLOCK_IN_USE |
2958
BLOCK_REASSIGNED)) &&
2959
!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
2960
BLOCK_IN_FLUSH | BLOCK_CHANGED |
2961
BLOCK_FOR_UPDATE)));
2962
assert(block->prev_changed && *block->prev_changed == block);
2963
assert(!block->prev_used);
2965
Unset BLOCK_REASSIGNED again. If we hand the block to an evicting
2966
thread (through unreg_request() below), other threads must not see
2967
this flag. They could become confused.
2969
block->status&= ~BLOCK_REASSIGNED;
2971
Do not release the hash_link until the block is off all lists.
2972
At least not if we hand it over for eviction in unreg_request().
2977
Unregister the block request and link the block into the LRU ring.
2978
This enables eviction for the block. If the LRU ring was empty and
2979
threads are waiting for a block, then the block wil be handed over
2980
for eviction immediately. Otherwise we will unlink it from the LRU
2981
ring again, without releasing the lock in between. So decrementing
2982
the request counter and updating statistics are the only relevant
2983
operation in this case. Assert that there are no other requests
2986
assert(block->requests == 1);
2987
unreg_request(keycache, block, 0);
2989
Note that even without releasing the cache lock it is possible that
2990
the block is immediately selected for eviction by link_block() and
2991
thus not added to the LRU ring. In this case we must not touch the
2994
if (block->status & BLOCK_IN_EVICTION)
2997
/* Here the block must be in the LRU ring. Unlink it again. */
2998
assert(block->next_used && block->prev_used &&
2999
*block->prev_used == block);
3000
unlink_block(keycache, block);
3001
if (block->temperature == BLOCK_WARM)
3002
keycache->warm_blocks--;
3003
block->temperature= BLOCK_COLD;
3005
/* Remove from file_blocks hash. */
3006
unlink_changed(block);
3008
/* Remove reference to block from hash table. */
3009
unlink_hash(keycache, block->hash_link);
3010
block->hash_link= NULL;
3014
block->offset= keycache->key_cache_block_size;
3016
/* Enforced by unlink_changed(), but just to be sure. */
3017
assert(!block->next_changed && !block->prev_changed);
3018
/* Enforced by unlink_block(): not in LRU ring nor in free_block_list. */
3019
assert(!block->next_used && !block->prev_used);
3020
/* Insert the free block in the free list. */
3021
block->next_used= keycache->free_block_list;
3022
keycache->free_block_list= block;
3023
/* Keep track of the number of currently unused blocks. */
3024
keycache->blocks_unused++;
3026
/* All pending requests for this page must be resubmitted. */
3027
release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
3031
static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b)
3033
return (((*a)->hash_link->diskpos < (*b)->hash_link->diskpos) ? -1 :
3034
((*a)->hash_link->diskpos > (*b)->hash_link->diskpos) ? 1 : 0);
3039
Flush a portion of changed blocks to disk,
3040
free used blocks if requested
3043
static int flush_cached_blocks(KEY_CACHE *keycache,
3044
File file, BLOCK_LINK **cache,
3046
enum flush_type type)
3050
uint32_t count= (uint) (end-cache);
3052
/* Don't lock the cache during the flush */
3053
keycache_pthread_mutex_unlock(&keycache->cache_lock);
3055
As all blocks referred in 'cache' are marked by BLOCK_IN_FLUSH
3056
we are guarunteed no thread will change them
3058
my_qsort((unsigned char*) cache, count, sizeof(*cache), (qsort_cmp) cmp_sec_link);
3060
keycache_pthread_mutex_lock(&keycache->cache_lock);
3062
Note: Do not break the loop. We have registered a request on every
3063
block in 'cache'. These must be unregistered by free_block() or
3066
for ( ; cache != end ; cache++)
3068
BLOCK_LINK *block= *cache;
3070
If the block contents is going to be changed, we abandon the flush
3071
for this block. flush_key_blocks_int() will restart its search and
3072
handle the block properly.
3074
if (!(block->status & BLOCK_FOR_UPDATE))
3076
/* Blocks coming here must have a certain status. */
3077
assert(block->hash_link);
3078
assert(block->hash_link->block == block);
3079
assert(block->hash_link->file == file);
3080
assert((block->status & ~BLOCK_IN_EVICTION) ==
3081
(BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE));
3082
block->status|= BLOCK_IN_FLUSHWRITE;
3083
keycache_pthread_mutex_unlock(&keycache->cache_lock);
3084
error= (pwrite(file,
3085
block->buffer+block->offset,
3086
block->length - block->offset,
3087
block->hash_link->diskpos+ block->offset) == 0);
3088
keycache_pthread_mutex_lock(&keycache->cache_lock);
3089
keycache->global_cache_write++;
3092
block->status|= BLOCK_ERROR;
3094
last_errno= errno ? errno : -1;
3096
block->status&= ~BLOCK_IN_FLUSHWRITE;
3097
/* Block must not have changed status except BLOCK_FOR_UPDATE. */
3098
assert(block->hash_link);
3099
assert(block->hash_link->block == block);
3100
assert(block->hash_link->file == file);
3101
assert((block->status & ~(BLOCK_FOR_UPDATE | BLOCK_IN_EVICTION)) ==
3102
(BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE));
3104
Set correct status and link in right queue for free or later use.
3105
free_block() must not see BLOCK_CHANGED and it may need to wait
3106
for readers of the block. These should not see the block in the
3107
wrong hash. If not freeing the block, we need to have it in the
3110
link_to_file_list(keycache, block, file, 1);
3113
block->status&= ~BLOCK_IN_FLUSH;
3115
Let to proceed for possible waiting requests to write to the block page.
3116
It might happen only during an operation to resize the key cache.
3118
release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
3119
/* type will never be FLUSH_IGNORE_CHANGED here */
3120
if (!(type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE) &&
3121
!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
3125
Note that a request has been registered against the block in
3126
flush_key_blocks_int().
3128
free_block(keycache, block);
3133
Link the block into the LRU ring if it's the last submitted
3134
request for the block. This enables eviction for the block.
3135
Note that a request has been registered against the block in
3136
flush_key_blocks_int().
3138
unreg_request(keycache, block, 1);
3141
} /* end of for ( ; cache != end ; cache++) */
3147
flush all key blocks for a file to disk, but don't do any mutex locks.
3150
flush_key_blocks_int()
3151
keycache pointer to a key cache data structure
3152
file handler for the file to flush to
3153
flush_type type of the flush
3156
This function doesn't do any mutex locks because it needs to be called both
3157
from flush_key_blocks and flush_all_key_blocks (the later one does the
3158
mutex lock in the resize_key_cache() function).
3160
We do only care about changed blocks that exist when the function is
3161
entered. We do not guarantee that all changed blocks of the file are
3162
flushed if more blocks change while this function is running.
3169
static int flush_key_blocks_int(KEY_CACHE *keycache,
3170
File file, enum flush_type type)
3172
BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache;
3177
if (keycache->disk_blocks > 0 &&
3178
(!my_disable_flush_key_blocks || type != FLUSH_KEEP))
3180
/* Key cache exists and flush is not disabled */
3182
uint32_t count= FLUSH_CACHE;
3183
BLOCK_LINK **pos,**end;
3184
BLOCK_LINK *first_in_switch= NULL;
3185
BLOCK_LINK *last_in_flush;
3186
BLOCK_LINK *last_for_update;
3187
BLOCK_LINK *block, *next;
3189
if (type != FLUSH_IGNORE_CHANGED)
3192
Count how many key blocks we have to cache to be able
3193
to flush all dirty pages with minimum seek moves
3196
for (block= keycache->changed_blocks[FILE_HASH(file)] ;
3198
block= block->next_changed)
3200
if ((block->hash_link->file == file) &&
3201
!(block->status & BLOCK_IN_FLUSH))
3204
assert(count<= keycache->blocks_used);
3208
Allocate a new buffer only if its bigger than the one we have.
3209
Assure that we always have some entries for the case that new
3210
changed blocks appear while we need to wait for something.
3212
if ((count > FLUSH_CACHE) &&
3213
!(cache= (BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count,
3217
After a restart there could be more changed blocks than now.
3218
So we should not let count become smaller than the fixed buffer.
3220
if (cache == cache_buff)
3224
/* Retrieve the blocks and write them to a buffer to be flushed */
3226
last_in_flush= NULL;
3227
last_for_update= NULL;
3228
end= (pos= cache)+count;
3229
for (block= keycache->changed_blocks[FILE_HASH(file)] ;
3233
next= block->next_changed;
3234
if (block->hash_link->file == file)
3236
if (!(block->status & (BLOCK_IN_FLUSH | BLOCK_FOR_UPDATE)))
3239
Note: The special handling of BLOCK_IN_SWITCH is obsolete
3240
since we set BLOCK_IN_FLUSH if the eviction includes a
3241
flush. It can be removed in a later version.
3243
if (!(block->status & BLOCK_IN_SWITCH))
3246
We care only for the blocks for which flushing was not
3247
initiated by another thread and which are not in eviction.
3248
Registering a request on the block unlinks it from the LRU
3249
ring and protects against eviction.
3251
reg_requests(keycache, block, 1);
3252
if (type != FLUSH_IGNORE_CHANGED)
3254
/* It's not a temporary file */
3258
This should happen relatively seldom. Remove the
3259
request because we won't do anything with the block
3260
but restart and pick it again in the next iteration.
3262
unreg_request(keycache, block, 0);
3264
This happens only if there is not enough
3265
memory for the big block
3267
if ((error= flush_cached_blocks(keycache, file, cache,
3270
/* Do not loop infinitely trying to flush in vain. */
3271
if ((last_errno == error) && (++last_errcnt > 5))
3276
Restart the scan as some other thread might have changed
3277
the changed blocks chain: the blocks that were in switch
3278
state before the flush started have to be excluded
3283
Mark the block with BLOCK_IN_FLUSH in order not to let
3284
other threads to use it for new pages and interfere with
3285
our sequence of flushing dirty file pages. We must not
3286
set this flag before actually putting the block on the
3287
write burst array called 'cache'.
3289
block->status|= BLOCK_IN_FLUSH;
3290
/* Add block to the array for a write burst. */
3295
/* It's a temporary file */
3296
assert(!(block->status & BLOCK_REASSIGNED));
3299
free_block() must not be called with BLOCK_CHANGED. Note
3300
that we must not change the BLOCK_CHANGED flag outside of
3301
link_to_file_list() so that it is always in the correct
3302
queue and the *blocks_changed counters are correct.
3304
link_to_file_list(keycache, block, file, 1);
3305
if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH)))
3307
/* A request has been registered against the block above. */
3308
free_block(keycache, block);
3313
Link the block into the LRU ring if it's the last
3314
submitted request for the block. This enables eviction
3315
for the block. A request has been registered against
3318
unreg_request(keycache, block, 1);
3325
Link the block into a list of blocks 'in switch'.
3327
WARNING: Here we introduce a place where a changed block
3328
is not in the changed_blocks hash! This is acceptable for
3329
a BLOCK_IN_SWITCH. Never try this for another situation.
3330
Other parts of the key cache code rely on changed blocks
3331
being in the changed_blocks hash.
3333
unlink_changed(block);
3334
link_changed(block, &first_in_switch);
3337
else if (type != FLUSH_KEEP)
3340
During the normal flush at end of statement (FLUSH_KEEP) we
3341
do not need to ensure that blocks in flush or update by
3342
other threads are flushed. They will be flushed by them
3343
later. In all other cases we must assure that we do not have
3344
any changed block of this file in the cache when this
3347
if (block->status & BLOCK_IN_FLUSH)
3349
/* Remember the last block found to be in flush. */
3350
last_in_flush= block;
3354
/* Remember the last block found to be selected for update. */
3355
last_for_update= block;
3362
if ((error= flush_cached_blocks(keycache, file, cache, pos, type)))
3364
/* Do not loop inifnitely trying to flush in vain. */
3365
if ((last_errno == error) && (++last_errcnt > 5))
3370
Do not restart here during the normal flush at end of statement
3371
(FLUSH_KEEP). We have now flushed at least all blocks that were
3372
changed when entering this function. In all other cases we must
3373
assure that we do not have any changed block of this file in the
3374
cache when this function returns.
3376
if (type != FLUSH_KEEP)
3382
There are no blocks to be flushed by this thread, but blocks in
3383
flush by other threads. Wait until one of the blocks is flushed.
3384
Re-check the condition for last_in_flush. We may have unlocked
3385
the cache_lock in flush_cached_blocks(). The state of the block
3388
if (last_in_flush->status & BLOCK_IN_FLUSH)
3389
wait_on_queue(&last_in_flush->wqueue[COND_FOR_SAVED],
3390
&keycache->cache_lock);
3391
/* Be sure not to lose a block. They may be flushed in random order. */
3394
if (last_for_update)
3397
There are no blocks to be flushed by this thread, but blocks for
3398
update by other threads. Wait until one of the blocks is updated.
3399
Re-check the condition for last_for_update. We may have unlocked
3400
the cache_lock in flush_cached_blocks(). The state of the block
3403
if (last_for_update->status & BLOCK_FOR_UPDATE)
3404
wait_on_queue(&last_for_update->wqueue[COND_FOR_REQUESTED],
3405
&keycache->cache_lock);
3406
/* The block is now changed. Flush it. */
3411
Wait until the list of blocks in switch is empty. The threads that
3412
are switching these blocks will relink them to clean file chains
3413
while we wait and thus empty the 'first_in_switch' chain.
3415
while (first_in_switch)
3417
wait_on_queue(&first_in_switch->wqueue[COND_FOR_SAVED],
3418
&keycache->cache_lock);
3420
Do not restart here. We have flushed all blocks that were
3421
changed when entering this function and were not marked for
3422
eviction. Other threads have now flushed all remaining blocks in
3423
the course of their eviction.
3427
if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE))
3429
BLOCK_LINK *last_for_update= NULL;
3430
BLOCK_LINK *last_in_switch= NULL;
3431
uint32_t total_found= 0;
3435
Finally free all clean blocks for this file.
3436
During resize this may be run by two threads in parallel.
3441
for (block= keycache->file_blocks[FILE_HASH(file)] ;
3445
/* Remember the next block. After freeing we cannot get at it. */
3446
next= block->next_changed;
3448
/* Changed blocks cannot appear in the file_blocks hash. */
3449
assert(!(block->status & BLOCK_CHANGED));
3450
if (block->hash_link->file == file)
3452
/* We must skip blocks that will be changed. */
3453
if (block->status & BLOCK_FOR_UPDATE)
3455
last_for_update= block;
3460
We must not free blocks in eviction (BLOCK_IN_EVICTION |
3461
BLOCK_IN_SWITCH) or blocks intended to be freed
3464
if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
3467
struct st_hash_link *next_hash_link= NULL;
3468
my_off_t next_diskpos= 0;
3470
uint32_t next_status= 0;
3471
uint32_t hash_requests= 0;
3475
assert(found <= keycache->blocks_used);
3478
Register a request. This unlinks the block from the LRU
3479
ring and protects it against eviction. This is required
3482
reg_requests(keycache, block, 1);
3485
free_block() may need to wait for readers of the block.
3486
This is the moment where the other thread can move the
3487
'next' block from the chain. free_block() needs to wait
3488
if there are requests for the block pending.
3490
if (next && (hash_requests= block->hash_link->requests))
3492
/* Copy values from the 'next' block and its hash_link. */
3493
next_status= next->status;
3494
next_hash_link= next->hash_link;
3495
next_diskpos= next_hash_link->diskpos;
3496
next_file= next_hash_link->file;
3497
assert(next == next_hash_link->block);
3500
free_block(keycache, block);
3502
If we had to wait and the state of the 'next' block
3503
changed, break the inner loop. 'next' may no longer be
3504
part of the current chain.
3506
We do not want to break the loop after every free_block(),
3507
not even only after waits. The chain might be quite long
3508
and contain blocks for many files. Traversing it again and
3509
again to find more blocks for this file could become quite
3512
if (next && hash_requests &&
3513
((next_status != next->status) ||
3514
(next_hash_link != next->hash_link) ||
3515
(next_file != next_hash_link->file) ||
3516
(next_diskpos != next_hash_link->diskpos) ||
3517
(next != next_hash_link->block)))
3522
last_in_switch= block;
3525
} /* end for block in file_blocks */
3529
If any clean block has been found, we may have waited for it to
3530
become free. In this case it could be possible that another clean
3531
block became dirty. This is possible if the write request existed
3532
before the flush started (BLOCK_FOR_UPDATE). Re-check the hashes.
3538
To avoid an infinite loop, wait until one of the blocks marked
3539
for update is updated.
3541
if (last_for_update)
3543
/* We did not wait. Block must not have changed status. */
3544
assert(last_for_update->status & BLOCK_FOR_UPDATE);
3545
wait_on_queue(&last_for_update->wqueue[COND_FOR_REQUESTED],
3546
&keycache->cache_lock);
3551
To avoid an infinite loop wait until one of the blocks marked
3552
for eviction is switched.
3556
/* We did not wait. Block must not have changed status. */
3557
assert(last_in_switch->status & (BLOCK_IN_EVICTION |
3560
wait_on_queue(&last_in_switch->wqueue[COND_FOR_SAVED],
3561
&keycache->cache_lock);
3565
} /* if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE)) */
3567
} /* if (keycache->disk_blocks > 0 */
3570
if (cache != cache_buff)
3571
free((unsigned char*) cache);
3573
errno=last_errno; /* Return first error */
3574
return(last_errno != 0);
438
3579
Flush all blocks for a file to disk
452
3593
int flush_key_blocks(KEY_CACHE *keycache,
453
int file, enum flush_type type)
457
assert (!keycache->key_cache_inited);
3594
File file, enum flush_type type)
3598
if (!keycache->key_cache_inited)
3601
keycache_pthread_mutex_lock(&keycache->cache_lock);
3602
/* While waiting for lock, keycache could have been ended. */
3603
if (keycache->disk_blocks > 0)
3605
inc_counter_for_resize_op(keycache);
3606
res= flush_key_blocks_int(keycache, file, type);
3607
dec_counter_for_resize_op(keycache);
3609
keycache_pthread_mutex_unlock(&keycache->cache_lock);
3615
Flush all blocks in the key cache to disk.
3618
flush_all_key_blocks()
3619
keycache pointer to key cache root structure
3623
Flushing of the whole key cache is done in two phases.
3625
1. Flush all changed blocks, waiting for them if necessary. Loop
3626
until there is no changed block left in the cache.
3628
2. Free all clean blocks. Normally this means free all blocks. The
3629
changed blocks were flushed in phase 1 and became clean. However we
3630
may need to wait for blocks that are read by other threads. While we
3631
wait, a clean block could become changed if that operation started
3632
before the resize operation started. To be safe we must restart at
3635
When we can run through the changed_blocks and file_blocks hashes
3636
without finding a block any more, then we are done.
3638
Note that we hold keycache->cache_lock all the time unless we need
3639
to wait for something.
3646
static int flush_all_key_blocks(KEY_CACHE *keycache)
3649
uint32_t total_found;
3655
safe_mutex_assert_owner(&keycache->cache_lock);
3659
Phase1: Flush all changed blocks, waiting for them if necessary.
3660
Loop until there is no changed block left in the cache.
3665
/* Step over the whole changed_blocks hash array. */
3666
for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++)
3669
If an array element is non-empty, use the first block from its
3670
chain to find a file for flush. All changed blocks for this
3671
file are flushed. So the same block will not appear at this
3672
place again with the next iteration. New writes for blocks are
3673
not accepted during the flush. If multiple files share the
3674
same hash bucket, one of them will be flushed per iteration
3675
of the outer loop of phase 1.
3677
if ((block= keycache->changed_blocks[idx]))
3681
Flush dirty blocks but do not free them yet. They can be used
3682
for reading until all other blocks are flushed too.
3684
if (flush_key_blocks_int(keycache, block->hash_link->file,
3693
Phase 2: Free all clean blocks. Normally this means free all
3694
blocks. The changed blocks were flushed in phase 1 and became
3695
clean. However we may need to wait for blocks that are read by
3696
other threads. While we wait, a clean block could become changed
3697
if that operation started before the resize operation started. To
3698
be safe we must restart at phase 1.
3703
/* Step over the whole file_blocks hash array. */
3704
for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++)
3707
If an array element is non-empty, use the first block from its
3708
chain to find a file for flush. All blocks for this file are
3709
freed. So the same block will not appear at this place again
3710
with the next iteration. If multiple files share the
3711
same hash bucket, one of them will be flushed per iteration
3712
of the outer loop of phase 2.
3714
if ((block= keycache->file_blocks[idx]))
3718
if (flush_key_blocks_int(keycache, block->hash_link->file,
3727
If any clean block has been found, we may have waited for it to
3728
become free. In this case it could be possible that another clean
3729
block became dirty. This is possible if the write request existed
3730
before the resize started (BLOCK_FOR_UPDATE). Re-check the hashes.
3732
} while (total_found);
3738
Reset the counters of a key cache.
3741
reset_key_cache_counters()
3742
name the name of a key cache
3743
key_cache pointer to the key kache to be reset
3746
This procedure is used by process_key_caches() to reset the counters of all
3747
currently used key caches, both the default one and the named ones.
3750
0 on success (always because it can't fail)
3753
int reset_key_cache_counters(const char *name __attribute__((unused)),
3754
KEY_CACHE *key_cache)
3756
if (!key_cache->key_cache_inited)
3760
key_cache->global_blocks_changed= 0; /* Key_blocks_not_flushed */
3761
key_cache->global_cache_r_requests= 0; /* Key_read_requests */
3762
key_cache->global_cache_read= 0; /* Key_reads */
3763
key_cache->global_cache_w_requests= 0; /* Key_write_requests */
3764
key_cache->global_cache_write= 0; /* Key_writes */
3768
#if defined(KEYCACHE_TIMEOUT)
3772
unsigned int hash_link_number(HASH_LINK *hash_link, KEY_CACHE *keycache)
3774
return ((unsigned int) (((char*)hash_link-(char *) keycache->hash_link_root)/
3775
sizeof(HASH_LINK)));
3779
unsigned int block_number(BLOCK_LINK *block, KEY_CACHE *keycache)
3781
return ((unsigned int) (((char*)block-(char *)keycache->block_root)/
3782
sizeof(BLOCK_LINK)));
3786
#define KEYCACHE_DUMP_FILE "keycache_dump.txt"
3787
#define MAX_QUEUE_LEN 100
3790
static void keycache_dump(KEY_CACHE *keycache)
3792
FILE *keycache_dump_file=fopen(KEYCACHE_DUMP_FILE, "w");
3793
struct st_my_thread_var *last;
3794
struct st_my_thread_var *thread;
3796
HASH_LINK *hash_link;
3797
KEYCACHE_PAGE *page;
3800
fprintf(keycache_dump_file, "thread:%u\n", thread->id);
3803
thread=last=waiting_for_hash_link.last_thread;
3804
fprintf(keycache_dump_file, "queue of threads waiting for hash link\n");
3808
thread=thread->next;
3809
page= (KEYCACHE_PAGE *) thread->opt_info;
3810
fprintf(keycache_dump_file,
3811
"thread:%u, (file,filepos)=(%u,%lu)\n",
3812
thread->id,(uint) page->file,(uint32_t) page->filepos);
3813
if (++i == MAX_QUEUE_LEN)
3816
while (thread != last);
3819
thread=last=waiting_for_block.last_thread;
3820
fprintf(keycache_dump_file, "queue of threads waiting for block\n");
3824
thread=thread->next;
3825
hash_link= (HASH_LINK *) thread->opt_info;
3826
fprintf(keycache_dump_file,
3827
"thread:%u hash_link:%u (file,filepos)=(%u,%u)\n",
3828
thread->id, (uint) hash_link_number(hash_link, keycache),
3829
(uint) hash_link->file,(uint32_t) hash_link->diskpos);
3830
if (++i == MAX_QUEUE_LEN)
3833
while (thread != last);
3835
for (i=0 ; i< keycache->blocks_used ; i++)
3838
block= &keycache->block_root[i];
3839
hash_link= block->hash_link;
3840
fprintf(keycache_dump_file,
3841
"block:%u hash_link:%d status:%x #requests=%u "
3842
"waiting_for_readers:%d\n",
3843
i, (int) (hash_link ? hash_link_number(hash_link, keycache) : -1),
3844
block->status, block->requests, block->condvar ? 1 : 0);
3845
for (j=0 ; j < 2; j++)
3847
KEYCACHE_WQUEUE *wqueue=&block->wqueue[j];
3848
thread= last= wqueue->last_thread;
3849
fprintf(keycache_dump_file, "queue #%d\n", j);
3854
thread=thread->next;
3855
fprintf(keycache_dump_file,
3856
"thread:%u\n", thread->id);
3857
if (++i == MAX_QUEUE_LEN)
3860
while (thread != last);
3864
fprintf(keycache_dump_file, "LRU chain:");
3865
block= keycache= used_last;
3870
block= block->next_used;
3871
fprintf(keycache_dump_file,
3872
"block:%u, ", block_number(block, keycache));
3874
while (block != keycache->used_last);
3876
fprintf(keycache_dump_file, "\n");
3878
fclose(keycache_dump_file);
3881
static int keycache_pthread_cond_wait(pthread_cond_t *cond,
3882
pthread_mutex_t *mutex)
3885
struct timeval now; /* time when we started waiting */
3886
struct timespec timeout; /* timeout value for the wait function */
3889
/* Get current time */
3890
gettimeofday(&now, &tz);
3891
/* Prepare timeout value */
3892
timeout.tv_sec= now.tv_sec + KEYCACHE_TIMEOUT;
3894
timeval uses microseconds.
3895
timespec uses nanoseconds.
3896
1 nanosecond = 1000 micro seconds
3898
timeout.tv_nsec= now.tv_usec * 1000;
3899
rc= pthread_cond_timedwait(cond, mutex, &timeout);
3900
if (rc == ETIMEDOUT || rc == ETIME)
3905
assert(rc != ETIMEDOUT);
3908
#endif /* defined(KEYCACHE_TIMEOUT) */