~drizzle-trunk/drizzle/development

1 by brian
clean slate
1
/* Copyright (C) 2000 MySQL AB
2
3
   This program is free software; you can redistribute it and/or modify
4
   it under the terms of the GNU General Public License as published by
5
   the Free Software Foundation; version 2 of the License.
6
7
   This program is distributed in the hope that it will be useful,
8
   but WITHOUT ANY WARRANTY; without even the implied warranty of
9
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10
   GNU General Public License for more details.
11
12
   You should have received a copy of the GNU General Public License
13
   along with this program; if not, write to the Free Software
14
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
15
16
/*
17
  These functions handle keyblock cacheing for ISAM and MyISAM tables.
18
19
  One cache can handle many files.
20
  It must contain buffers of the same blocksize.
21
  init_key_cache() should be used to init cache handler.
22
23
  The free list (free_block_list) is a stack like structure.
24
  When a block is freed by free_block(), it is pushed onto the stack.
25
  When a new block is required it is first tried to pop one from the stack.
26
  If the stack is empty, it is tried to get a never-used block from the pool.
27
  If this is empty too, then a block is taken from the LRU ring, flushing it
28
  to disk, if neccessary. This is handled in find_key_block().
29
  With the new free list, the blocks can have three temperatures:
30
  hot, warm and cold (which is free). This is remembered in the block header
31
  by the enum BLOCK_TEMPERATURE temperature variable. Remembering the
32
  temperature is neccessary to correctly count the number of warm blocks,
33
  which is required to decide when blocks are allowed to become hot. Whenever
34
  a block is inserted to another (sub-)chain, we take the old and new
35
  temperature into account to decide if we got one more or less warm block.
36
  blocks_unused is the sum of never used blocks in the pool and of currently
37
  free blocks. blocks_used is the number of blocks fetched from the pool and
38
  as such gives the maximum number of in-use blocks at any time.
39
40
  Key Cache Locking
41
  =================
42
43
  All key cache locking is done with a single mutex per key cache:
44
  keycache->cache_lock. This mutex is locked almost all the time
45
  when executing code in this file (mf_keycache.c).
46
  However it is released for I/O and some copy operations.
47
48
  The cache_lock is also released when waiting for some event. Waiting
49
  and signalling is done via condition variables. In most cases the
50
  thread waits on its thread->suspend condition variable. Every thread
51
  has a my_thread_var structure, which contains this variable and a
52
  '*next' and '**prev' pointer. These pointers are used to insert the
53
  thread into a wait queue.
54
55
  A thread can wait for one block and thus be in one wait queue at a
56
  time only.
57
58
  Before starting to wait on its condition variable with
59
  pthread_cond_wait(), the thread enters itself to a specific wait queue
60
  with link_into_queue() (double linked with '*next' + '**prev') or
61
  wait_on_queue() (single linked with '*next').
62
63
  Another thread, when releasing a resource, looks up the waiting thread
64
  in the related wait queue. It sends a signal with
65
  pthread_cond_signal() to the waiting thread.
66
67
  NOTE: Depending on the particular wait situation, either the sending
68
  thread removes the waiting thread from the wait queue with
69
  unlink_from_queue() or release_whole_queue() respectively, or the waiting
70
  thread removes itself.
71
72
  There is one exception from this locking scheme when one thread wants
73
  to reuse a block for some other address. This works by first marking
74
  the block reserved (status= BLOCK_IN_SWITCH) and then waiting for all
75
  threads that are reading the block to finish. Each block has a
76
  reference to a condition variable (condvar). It holds a reference to
77
  the thread->suspend condition variable for the waiting thread (if such
78
  a thread exists). When that thread is signaled, the reference is
79
  cleared. The number of readers of a block is registered in
80
  block->hash_link->requests. See wait_for_readers() / remove_reader()
81
  for details. This is similar to the above, but it clearly means that
82
  only one thread can wait for a particular block. There is no queue in
83
  this case. Strangely enough block->convar is used for waiting for the
84
  assigned hash_link only. More precisely it is used to wait for all
85
  requests to be unregistered from the assigned hash_link.
86
87
  The resize_queue serves two purposes:
88
  1. Threads that want to do a resize wait there if in_resize is set.
89
     This is not used in the server. The server refuses a second resize
90
     request if one is already active. keycache->in_init is used for the
91
     synchronization. See set_var.cc.
92
  2. Threads that want to access blocks during resize wait here during
93
     the re-initialization phase.
94
  When the resize is done, all threads on the queue are signalled.
95
  Hypothetical resizers can compete for resizing, and read/write
96
  requests will restart to request blocks from the freshly resized
97
  cache. If the cache has been resized too small, it is disabled and
98
  'can_be_used' is false. In this case read/write requests bypass the
99
  cache. Since they increment and decrement 'cnt_for_resize_op', the
100
  next resizer can wait on the queue 'waiting_for_resize_cnt' until all
101
  I/O finished.
102
*/
103
212.5.39 by Monty Taylor
Phew. Moved my_base and my_global.
104
#include <drizzled/global.h>
212.5.13 by Monty Taylor
Moved my_sys/my_pthread/my_nosys and mysys_err to mysys.
105
#include <mysys/mysys_err.h>
106
#include <mysys/my_sys.h>
1 by brian
clean slate
107
#include <keycache.h>
212.5.18 by Monty Taylor
Moved m_ctype, m_string and my_bitmap. Removed t_ctype.
108
#include <mystrings/m_string.h>
212.5.28 by Monty Taylor
Moved my_bit and my_list
109
#include <mysys/my_bit.h>
1 by brian
clean slate
110
#include <errno.h>
111
#include <stdarg.h>
112
113
/*
114
  Some compilation flags have been added specifically for this module
115
  to control the following:
116
  - not to let a thread to yield the control when reading directly
117
    from key cache, which might improve performance in many cases;
118
    to enable this add:
119
    #define SERIALIZED_READ_FROM_CACHE
120
  - to set an upper bound for number of threads simultaneously
121
    using the key cache; this setting helps to determine an optimal
122
    size for hash table and improve performance when the number of
123
    blocks in the key cache much less than the number of threads
124
    accessing it;
125
    to set this number equal to <N> add
126
      #define MAX_THREADS <N>
127
  - to substitute calls of pthread_cond_wait for calls of
128
    pthread_cond_timedwait (wait with timeout set up);
129
    this setting should be used only when you want to trap a deadlock
130
    situation, which theoretically should not happen;
131
    to set timeout equal to <T> seconds add
132
      #define KEYCACHE_TIMEOUT <T>
133
134
  Example of the settings:
135
    #define SERIALIZED_READ_FROM_CACHE
136
    #define MAX_THREADS   100
137
    #define KEYCACHE_TIMEOUT  1
138
*/
139
140
#define STRUCT_PTR(TYPE, MEMBER, a)                                           \
141
          (TYPE *) ((char *) (a) - offsetof(TYPE, MEMBER))
142
143
/* types of condition variables */
144
#define  COND_FOR_REQUESTED 0
145
#define  COND_FOR_SAVED     1
146
147
typedef pthread_cond_t KEYCACHE_CONDVAR;
148
149
/* descriptor of the page in the key cache block buffer */
150
struct st_keycache_page
151
{
152
  int file;               /* file to which the page belongs to  */
153
  my_off_t filepos;       /* position of the page in the file   */
154
};
155
156
/* element in the chain of a hash table bucket */
157
struct st_hash_link
158
{
159
  struct st_hash_link *next, **prev; /* to connect links in the same bucket  */
160
  struct st_block_link *block;       /* reference to the block for the page: */
161
  File file;                         /* from such a file                     */
162
  my_off_t diskpos;                  /* with such an offset                  */
482 by Brian Aker
Remove uint.
163
  uint32_t requests;                     /* number of requests for the page      */
1 by brian
clean slate
164
};
165
166
/* simple states of a block */
167
#define BLOCK_ERROR           1 /* an error occured when performing file i/o */
168
#define BLOCK_READ            2 /* file block is in the block buffer         */
169
#define BLOCK_IN_SWITCH       4 /* block is preparing to read new page       */
170
#define BLOCK_REASSIGNED      8 /* blk does not accept requests for old page */
171
#define BLOCK_IN_FLUSH       16 /* block is selected for flush               */
172
#define BLOCK_CHANGED        32 /* block buffer contains a dirty page        */
173
#define BLOCK_IN_USE         64 /* block is not free                         */
174
#define BLOCK_IN_EVICTION   128 /* block is selected for eviction            */
175
#define BLOCK_IN_FLUSHWRITE 256 /* block is in write to file                 */
176
#define BLOCK_FOR_UPDATE    512 /* block is selected for buffer modification */
177
178
/* page status, returned by find_key_block */
179
#define PAGE_READ               0
180
#define PAGE_TO_BE_READ         1
181
#define PAGE_WAIT_TO_BE_READ    2
182
183
/* block temperature determines in which (sub-)chain the block currently is */
184
enum BLOCK_TEMPERATURE { BLOCK_COLD /*free*/ , BLOCK_WARM , BLOCK_HOT };
185
186
/* key cache block */
187
struct st_block_link
188
{
189
  struct st_block_link
190
    *next_used, **prev_used;   /* to connect links in the LRU chain (ring)   */
191
  struct st_block_link
192
    *next_changed, **prev_changed; /* for lists of file dirty/clean blocks   */
193
  struct st_hash_link *hash_link; /* backward ptr to referring hash_link     */
194
  KEYCACHE_WQUEUE wqueue[2]; /* queues on waiting requests for new/old pages */
482 by Brian Aker
Remove uint.
195
  uint32_t requests;          /* number of requests for the block                */
481 by Brian Aker
Remove all of uchar.
196
  unsigned char *buffer;           /* buffer for the block page                       */
482 by Brian Aker
Remove uint.
197
  uint32_t offset;            /* beginning of modified data in the buffer        */
198
  uint32_t length;            /* end of data in the buffer                       */
199
  uint32_t status;            /* state of the block                              */
1 by brian
clean slate
200
  enum BLOCK_TEMPERATURE temperature; /* block temperature: cold, warm, hot */
482 by Brian Aker
Remove uint.
201
  uint32_t hits_left;         /* number of hits left until promotion             */
151 by Brian Aker
Ulonglong to uint64_t
202
  uint64_t last_hit_time; /* timestamp of the last hit                      */
1 by brian
clean slate
203
  KEYCACHE_CONDVAR *condvar; /* condition variable for 'no readers' event    */
204
};
205
206
KEY_CACHE dflt_key_cache_var;
207
KEY_CACHE *dflt_key_cache= &dflt_key_cache_var;
208
209
#define FLUSH_CACHE         2000            /* sort this many blocks at once */
210
211
static int flush_all_key_blocks(KEY_CACHE *keycache);
212
static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
213
                          pthread_mutex_t *mutex);
214
static void release_whole_queue(KEYCACHE_WQUEUE *wqueue);
215
static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block);
216
217
#define KEYCACHE_HASH(f, pos)                                                 \
303 by Brian Aker
First pass in removing ulong from MyISAM
218
(((uint32_t) ((pos) / keycache->key_cache_block_size) +                          \
219
                                     (uint32_t) (f)) & (keycache->hash_entries-1))
1 by brian
clean slate
220
#define FILE_HASH(f)                 ((uint) (f) & (CHANGED_BLOCKS_HASH-1))
221
222
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
223
#ifdef KEYCACHE_TIMEOUT
1 by brian
clean slate
224
static int keycache_pthread_cond_wait(pthread_cond_t *cond,
225
                                      pthread_mutex_t *mutex);
226
#else
227
#define  keycache_pthread_cond_wait pthread_cond_wait
228
#endif
229
230
#define keycache_pthread_mutex_lock pthread_mutex_lock
231
#define keycache_pthread_mutex_unlock pthread_mutex_unlock
232
#define keycache_pthread_cond_signal pthread_cond_signal
233
482 by Brian Aker
Remove uint.
234
static inline uint32_t next_power(uint32_t value)
1 by brian
clean slate
235
{
205 by Brian Aker
uint32 -> uin32_t
236
  return (uint) my_round_up_to_next_power((uint32_t) value) << 1;
1 by brian
clean slate
237
}
238
239
240
/*
241
  Initialize a key cache
242
243
  SYNOPSIS
244
    init_key_cache()
245
    keycache			pointer to a key cache data structure
246
    key_cache_block_size	size of blocks to keep cached data
247
    use_mem                 	total memory to use for the key cache
248
    division_limit		division limit (may be zero)
249
    age_threshold		age threshold (may be zero)
250
251
  RETURN VALUE
252
    number of blocks in the key cache, if successful,
253
    0 - otherwise.
254
255
  NOTES.
256
    if keycache->key_cache_inited != 0 we assume that the key cache
257
    is already initialized.  This is for now used by myisamchk, but shouldn't
258
    be something that a program should rely on!
259
260
    It's assumed that no two threads call this function simultaneously
261
    referring to the same key cache handle.
262
263
*/
264
482 by Brian Aker
Remove uint.
265
int init_key_cache(KEY_CACHE *keycache, uint32_t key_cache_block_size,
266
		   size_t use_mem, uint32_t division_limit,
267
		   uint32_t age_threshold)
1 by brian
clean slate
268
{
303 by Brian Aker
First pass in removing ulong from MyISAM
269
  uint32_t blocks, hash_links;
1 by brian
clean slate
270
  size_t length;
271
  int error;
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
272
  assert(key_cache_block_size >= 512);
1 by brian
clean slate
273
274
  if (keycache->key_cache_inited && keycache->disk_blocks > 0)
275
  {
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
276
    return(0);
1 by brian
clean slate
277
  }
278
279
  keycache->global_cache_w_requests= keycache->global_cache_r_requests= 0;
280
  keycache->global_cache_read= keycache->global_cache_write= 0;
281
  keycache->disk_blocks= -1;
282
  if (! keycache->key_cache_inited)
283
  {
284
    keycache->key_cache_inited= 1;
285
    /*
286
      Initialize these variables once only.
287
      Their value must survive re-initialization during resizing.
288
    */
289
    keycache->in_resize= 0;
290
    keycache->resize_in_flush= 0;
291
    keycache->cnt_for_resize_op= 0;
292
    keycache->waiting_for_resize_cnt.last_thread= NULL;
293
    keycache->in_init= 0;
294
    pthread_mutex_init(&keycache->cache_lock, MY_MUTEX_INIT_FAST);
295
    keycache->resize_queue.last_thread= NULL;
296
  }
297
298
  keycache->key_cache_mem_size= use_mem;
299
  keycache->key_cache_block_size= key_cache_block_size;
300
303 by Brian Aker
First pass in removing ulong from MyISAM
301
  blocks= (uint32_t) (use_mem / (sizeof(BLOCK_LINK) + 2 * sizeof(HASH_LINK) +
1 by brian
clean slate
302
                              sizeof(HASH_LINK*) * 5/4 + key_cache_block_size));
303
  /* It doesn't make sense to have too few blocks (less than 8) */
304
  if (blocks >= 8)
305
  {
306
    for ( ; ; )
307
    {
308
      /* Set my_hash_entries to the next bigger 2 power */
309
      if ((keycache->hash_entries= next_power(blocks)) < blocks * 5/4)
310
        keycache->hash_entries<<= 1;
311
      hash_links= 2 * blocks;
312
#if defined(MAX_THREADS)
313
      if (hash_links < MAX_THREADS + blocks - 1)
314
        hash_links= MAX_THREADS + blocks - 1;
315
#endif
316
      while ((length= (ALIGN_SIZE(blocks * sizeof(BLOCK_LINK)) +
317
		       ALIGN_SIZE(hash_links * sizeof(HASH_LINK)) +
318
		       ALIGN_SIZE(sizeof(HASH_LINK*) *
319
                                  keycache->hash_entries))) +
320
	     ((size_t) blocks * keycache->key_cache_block_size) > use_mem)
321
        blocks--;
322
      /* Allocate memory for cache page buffers */
30 by Brian Aker
Large file and ftruncate() support
323
      if ((keycache->block_mem= malloc((size_t) blocks * keycache->key_cache_block_size)))
1 by brian
clean slate
324
      {
325
        /*
326
	  Allocate memory for blocks, hash_links and hash entries;
327
	  For each block 2 hash links are allocated
328
        */
329
        if ((keycache->block_root= (BLOCK_LINK*) my_malloc(length,
330
                                                           MYF(0))))
331
          break;
30 by Brian Aker
Large file and ftruncate() support
332
        free(keycache->block_mem);
1 by brian
clean slate
333
        keycache->block_mem= 0;
334
      }
335
      if (blocks < 8)
336
      {
337
        my_errno= ENOMEM;
338
        my_error(EE_OUTOFMEMORY, MYF(0), blocks * keycache->key_cache_block_size);
339
        goto err;
340
      }
341
      blocks= blocks / 4*3;
342
    }
343
    keycache->blocks_unused= blocks;
344
    keycache->disk_blocks= (int) blocks;
345
    keycache->hash_links= hash_links;
346
    keycache->hash_root= (HASH_LINK**) ((char*) keycache->block_root +
347
				        ALIGN_SIZE(blocks*sizeof(BLOCK_LINK)));
348
    keycache->hash_link_root= (HASH_LINK*) ((char*) keycache->hash_root +
349
				            ALIGN_SIZE((sizeof(HASH_LINK*) *
350
							keycache->hash_entries)));
212.6.12 by Mats Kindahl
Removing redundant use of casts in MyISAM storage for memcmp(), memcpy(), memset(), and memmove().
351
    memset(keycache->block_root, 0,
212.6.1 by Mats Kindahl
Replacing all bzero() calls with memset() calls and removing the bzero.c file.
352
           keycache->disk_blocks * sizeof(BLOCK_LINK));
212.6.12 by Mats Kindahl
Removing redundant use of casts in MyISAM storage for memcmp(), memcpy(), memset(), and memmove().
353
    memset(keycache->hash_root, 0,
212.6.1 by Mats Kindahl
Replacing all bzero() calls with memset() calls and removing the bzero.c file.
354
           keycache->hash_entries * sizeof(HASH_LINK*));
212.6.12 by Mats Kindahl
Removing redundant use of casts in MyISAM storage for memcmp(), memcpy(), memset(), and memmove().
355
    memset(keycache->hash_link_root, 0,
212.6.1 by Mats Kindahl
Replacing all bzero() calls with memset() calls and removing the bzero.c file.
356
           keycache->hash_links * sizeof(HASH_LINK));
1 by brian
clean slate
357
    keycache->hash_links_used= 0;
358
    keycache->free_hash_list= NULL;
359
    keycache->blocks_used= keycache->blocks_changed= 0;
360
361
    keycache->global_blocks_changed= 0;
362
    keycache->blocks_available=0;		/* For debugging */
363
364
    /* The LRU chain is empty after initialization */
365
    keycache->used_last= NULL;
366
    keycache->used_ins= NULL;
367
    keycache->free_block_list= NULL;
368
    keycache->keycache_time= 0;
369
    keycache->warm_blocks= 0;
370
    keycache->min_warm_blocks= (division_limit ?
371
				blocks * division_limit / 100 + 1 :
372
				blocks);
373
    keycache->age_threshold= (age_threshold ?
374
			      blocks * age_threshold / 100 :
375
			      blocks);
376
377
    keycache->can_be_used= 1;
378
379
    keycache->waiting_for_hash_link.last_thread= NULL;
380
    keycache->waiting_for_block.last_thread= NULL;
212.6.12 by Mats Kindahl
Removing redundant use of casts in MyISAM storage for memcmp(), memcpy(), memset(), and memmove().
381
    memset(keycache->changed_blocks, 0,
212.6.1 by Mats Kindahl
Replacing all bzero() calls with memset() calls and removing the bzero.c file.
382
           sizeof(keycache->changed_blocks[0]) * CHANGED_BLOCKS_HASH);
212.6.12 by Mats Kindahl
Removing redundant use of casts in MyISAM storage for memcmp(), memcpy(), memset(), and memmove().
383
    memset(keycache->file_blocks, 0,
212.6.1 by Mats Kindahl
Replacing all bzero() calls with memset() calls and removing the bzero.c file.
384
           sizeof(keycache->file_blocks[0]) * CHANGED_BLOCKS_HASH);
1 by brian
clean slate
385
  }
386
  else
387
  {
388
    /* key_buffer_size is specified too small. Disable the cache. */
389
    keycache->can_be_used= 0;
390
  }
391
392
  keycache->blocks= keycache->disk_blocks > 0 ? keycache->disk_blocks : 0;
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
393
  return((int) keycache->disk_blocks);
1 by brian
clean slate
394
395
err:
396
  error= my_errno;
397
  keycache->disk_blocks= 0;
398
  keycache->blocks=  0;
399
  if (keycache->block_mem)
400
  {
30 by Brian Aker
Large file and ftruncate() support
401
    free(keycache->block_mem);
1 by brian
clean slate
402
    keycache->block_mem= NULL;
403
  }
404
  if (keycache->block_root)
405
  {
481 by Brian Aker
Remove all of uchar.
406
    free((unsigned char*) keycache->block_root);
1 by brian
clean slate
407
    keycache->block_root= NULL;
408
  }
409
  my_errno= error;
410
  keycache->can_be_used= 0;
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
411
  return(0);
1 by brian
clean slate
412
}
413
414
415
/*
416
  Resize a key cache
417
418
  SYNOPSIS
419
    resize_key_cache()
420
    keycache     	        pointer to a key cache data structure
421
    key_cache_block_size        size of blocks to keep cached data
422
    use_mem			total memory to use for the new key cache
423
    division_limit		new division limit (if not zero)
424
    age_threshold		new age threshold (if not zero)
425
426
  RETURN VALUE
427
    number of blocks in the key cache, if successful,
428
    0 - otherwise.
429
430
  NOTES.
431
    The function first compares the memory size and the block size parameters
432
    with the key cache values.
433
434
    If they differ the function free the the memory allocated for the
435
    old key cache blocks by calling the end_key_cache function and
436
    then rebuilds the key cache with new blocks by calling
437
    init_key_cache.
438
439
    The function starts the operation only when all other threads
440
    performing operations with the key cache let her to proceed
441
    (when cnt_for_resize=0).
442
*/
443
482 by Brian Aker
Remove uint.
444
int resize_key_cache(KEY_CACHE *keycache, uint32_t key_cache_block_size,
445
		     size_t use_mem, uint32_t division_limit,
446
		     uint32_t age_threshold)
1 by brian
clean slate
447
{
448
  int blocks;
449
450
  if (!keycache->key_cache_inited)
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
451
    return(keycache->disk_blocks);
1 by brian
clean slate
452
453
  if(key_cache_block_size == keycache->key_cache_block_size &&
454
     use_mem == keycache->key_cache_mem_size)
455
  {
456
    change_key_cache_param(keycache, division_limit, age_threshold);
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
457
    return(keycache->disk_blocks);
1 by brian
clean slate
458
  }
459
460
  keycache_pthread_mutex_lock(&keycache->cache_lock);
461
462
  /*
463
    We may need to wait for another thread which is doing a resize
464
    already. This cannot happen in the MySQL server though. It allows
465
    one resizer only. In set_var.cc keycache->in_init is used to block
466
    multiple attempts.
467
  */
468
  while (keycache->in_resize)
469
  {
470
    /* purecov: begin inspected */
471
    wait_on_queue(&keycache->resize_queue, &keycache->cache_lock);
472
    /* purecov: end */
473
  }
474
475
  /*
476
    Mark the operation in progress. This blocks other threads from doing
477
    a resize in parallel. It prohibits new blocks to enter the cache.
478
    Read/write requests can bypass the cache during the flush phase.
479
  */
480
  keycache->in_resize= 1;
481
482
  /* Need to flush only if keycache is enabled. */
483
  if (keycache->can_be_used)
484
  {
485
    /* Start the flush phase. */
486
    keycache->resize_in_flush= 1;
487
488
    if (flush_all_key_blocks(keycache))
489
    {
490
      /* TODO: if this happens, we should write a warning in the log file ! */
491
      keycache->resize_in_flush= 0;
492
      blocks= 0;
493
      keycache->can_be_used= 0;
494
      goto finish;
495
    }
496
497
    /* End the flush phase. */
498
    keycache->resize_in_flush= 0;
499
  }
500
501
  /*
502
    Some direct read/write operations (bypassing the cache) may still be
503
    unfinished. Wait until they are done. If the key cache can be used,
504
    direct I/O is done in increments of key_cache_block_size. That is,
505
    every block is checked if it is in the cache. We need to wait for
506
    pending I/O before re-initializing the cache, because we may change
507
    the block size. Otherwise they could check for blocks at file
508
    positions where the new block division has none. We do also want to
509
    wait for I/O done when (if) the cache was disabled. It must not
510
    run in parallel with normal cache operation.
511
  */
512
  while (keycache->cnt_for_resize_op)
513
    wait_on_queue(&keycache->waiting_for_resize_cnt, &keycache->cache_lock);
514
515
  /*
516
    Free old cache structures, allocate new structures, and initialize
517
    them. Note that the cache_lock mutex and the resize_queue are left
518
    untouched. We do not lose the cache_lock and will release it only at
519
    the end of this function.
520
  */
521
  end_key_cache(keycache, 0);			/* Don't free mutex */
522
  /* The following will work even if use_mem is 0 */
523
  blocks= init_key_cache(keycache, key_cache_block_size, use_mem,
524
			 division_limit, age_threshold);
525
526
finish:
527
  /*
528
    Mark the resize finished. This allows other threads to start a
529
    resize or to request new cache blocks.
530
  */
531
  keycache->in_resize= 0;
532
533
  /* Signal waiting threads. */
534
  release_whole_queue(&keycache->resize_queue);
535
536
  keycache_pthread_mutex_unlock(&keycache->cache_lock);
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
537
  return(blocks);
1 by brian
clean slate
538
}
539
540
541
/*
542
  Increment counter blocking resize key cache operation
543
*/
544
static inline void inc_counter_for_resize_op(KEY_CACHE *keycache)
545
{
546
  keycache->cnt_for_resize_op++;
547
}
548
549
550
/*
551
  Decrement counter blocking resize key cache operation;
552
  Signal the operation to proceed when counter becomes equal zero
553
*/
554
static inline void dec_counter_for_resize_op(KEY_CACHE *keycache)
555
{
556
  if (!--keycache->cnt_for_resize_op)
557
    release_whole_queue(&keycache->waiting_for_resize_cnt);
558
}
559
560
/*
561
  Change the key cache parameters
562
563
  SYNOPSIS
564
    change_key_cache_param()
565
    keycache			pointer to a key cache data structure
566
    division_limit		new division limit (if not zero)
567
    age_threshold		new age threshold (if not zero)
568
569
  RETURN VALUE
570
    none
571
572
  NOTES.
573
    Presently the function resets the key cache parameters
574
    concerning midpoint insertion strategy - division_limit and
575
    age_threshold.
576
*/
577
482 by Brian Aker
Remove uint.
578
void change_key_cache_param(KEY_CACHE *keycache, uint32_t division_limit,
579
			    uint32_t age_threshold)
1 by brian
clean slate
580
{
581
  keycache_pthread_mutex_lock(&keycache->cache_lock);
582
  if (division_limit)
583
    keycache->min_warm_blocks= (keycache->disk_blocks *
584
				division_limit / 100 + 1);
585
  if (age_threshold)
586
    keycache->age_threshold=   (keycache->disk_blocks *
587
				age_threshold / 100);
588
  keycache_pthread_mutex_unlock(&keycache->cache_lock);
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
589
  return;
1 by brian
clean slate
590
}
591
592
593
/*
594
  Remove key_cache from memory
595
596
  SYNOPSIS
597
    end_key_cache()
598
    keycache		key cache handle
599
    cleanup		Complete free (Free also mutex for key cache)
600
601
  RETURN VALUE
602
    none
603
*/
604
281 by Brian Aker
Converted myisam away from my_bool
605
void end_key_cache(KEY_CACHE *keycache, bool cleanup)
1 by brian
clean slate
606
{
607
  if (!keycache->key_cache_inited)
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
608
    return;
1 by brian
clean slate
609
610
  if (keycache->disk_blocks > 0)
611
  {
612
    if (keycache->block_mem)
613
    {
30 by Brian Aker
Large file and ftruncate() support
614
      free(keycache->block_mem);
1 by brian
clean slate
615
      keycache->block_mem= NULL;
481 by Brian Aker
Remove all of uchar.
616
      free((unsigned char*) keycache->block_root);
1 by brian
clean slate
617
      keycache->block_root= NULL;
618
    }
619
    keycache->disk_blocks= -1;
620
    /* Reset blocks_changed to be safe if flush_all_key_blocks is called */
621
    keycache->blocks_changed= 0;
622
  }
623
624
  if (cleanup)
625
  {
626
    pthread_mutex_destroy(&keycache->cache_lock);
627
    keycache->key_cache_inited= keycache->can_be_used= 0;
628
  }
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
629
  return;
1 by brian
clean slate
630
} /* end_key_cache */
631
632
633
/*
634
  Link a thread into double-linked queue of waiting threads.
635
636
  SYNOPSIS
637
    link_into_queue()
638
      wqueue              pointer to the queue structure
639
      thread              pointer to the thread to be added to the queue
640
641
  RETURN VALUE
642
    none
643
644
  NOTES.
645
    Queue is represented by a circular list of the thread structures
646
    The list is double-linked of the type (**prev,*next), accessed by
647
    a pointer to the last element.
648
*/
649
650
static void link_into_queue(KEYCACHE_WQUEUE *wqueue,
651
                                   struct st_my_thread_var *thread)
652
{
653
  struct st_my_thread_var *last;
654
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
655
  assert(!thread->next && !thread->prev);
1 by brian
clean slate
656
  if (! (last= wqueue->last_thread))
657
  {
658
    /* Queue is empty */
659
    thread->next= thread;
660
    thread->prev= &thread->next;
661
  }
662
  else
663
  {
664
    thread->prev= last->next->prev;
665
    last->next->prev= &thread->next;
666
    thread->next= last->next;
667
    last->next= thread;
668
  }
669
  wqueue->last_thread= thread;
670
}
671
672
/*
673
  Unlink a thread from double-linked queue of waiting threads
674
675
  SYNOPSIS
676
    unlink_from_queue()
677
      wqueue              pointer to the queue structure
678
      thread              pointer to the thread to be removed from the queue
679
680
  RETURN VALUE
681
    none
682
683
  NOTES.
684
    See NOTES for link_into_queue
685
*/
686
687
static void unlink_from_queue(KEYCACHE_WQUEUE *wqueue,
688
                                     struct st_my_thread_var *thread)
689
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
690
  assert(thread->next && thread->prev);
1 by brian
clean slate
691
  if (thread->next == thread)
692
    /* The queue contains only one member */
693
    wqueue->last_thread= NULL;
694
  else
695
  {
696
    thread->next->prev= thread->prev;
697
    *thread->prev=thread->next;
698
    if (wqueue->last_thread == thread)
699
      wqueue->last_thread= STRUCT_PTR(struct st_my_thread_var, next,
700
                                      thread->prev);
701
  }
702
  thread->next= NULL;
703
  thread->prev= NULL;
704
}
705
706
707
/*
708
  Add a thread to single-linked queue of waiting threads
709
710
  SYNOPSIS
711
    wait_on_queue()
712
      wqueue            Pointer to the queue structure.
713
      mutex             Cache_lock to acquire after awake.
714
715
  RETURN VALUE
716
    none
717
718
  NOTES.
719
    Queue is represented by a circular list of the thread structures
720
    The list is single-linked of the type (*next), accessed by a pointer
721
    to the last element.
722
723
    The function protects against stray signals by verifying that the
724
    current thread is unlinked from the queue when awaking. However,
725
    since several threads can wait for the same event, it might be
726
    necessary for the caller of the function to check again if the
727
    condition for awake is indeed matched.
728
*/
729
730
static void wait_on_queue(KEYCACHE_WQUEUE *wqueue,
731
                          pthread_mutex_t *mutex)
732
{
733
  struct st_my_thread_var *last;
734
  struct st_my_thread_var *thread= my_thread_var;
735
736
  /* Add to queue. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
737
  assert(!thread->next);
738
  assert(!thread->prev); /* Not required, but must be true anyway. */
1 by brian
clean slate
739
  if (! (last= wqueue->last_thread))
740
    thread->next= thread;
741
  else
742
  {
743
    thread->next= last->next;
744
    last->next= thread;
745
  }
746
  wqueue->last_thread= thread;
747
748
  /*
749
    Wait until thread is removed from queue by the signalling thread.
750
    The loop protects against stray signals.
751
  */
752
  do
753
  {
754
    keycache_pthread_cond_wait(&thread->suspend, mutex);
755
  }
756
  while (thread->next);
757
}
758
759
760
/*
761
  Remove all threads from queue signaling them to proceed
762
763
  SYNOPSIS
764
    release_whole_queue()
765
      wqueue            pointer to the queue structure
766
767
  RETURN VALUE
768
    none
769
770
  NOTES.
771
    See notes for wait_on_queue().
772
    When removed from the queue each thread is signaled via condition
773
    variable thread->suspend.
774
*/
775
776
static void release_whole_queue(KEYCACHE_WQUEUE *wqueue)
777
{
778
  struct st_my_thread_var *last;
779
  struct st_my_thread_var *next;
780
  struct st_my_thread_var *thread;
781
782
  /* Queue may be empty. */
783
  if (!(last= wqueue->last_thread))
784
    return;
785
786
  next= last->next;
787
  do
788
  {
789
    thread=next;
790
    /* Signal the thread. */
791
    keycache_pthread_cond_signal(&thread->suspend);
792
    /* Take thread from queue. */
793
    next=thread->next;
794
    thread->next= NULL;
795
  }
796
  while (thread != last);
797
798
  /* Now queue is definitely empty. */
799
  wqueue->last_thread= NULL;
800
}
801
802
803
/*
804
  Unlink a block from the chain of dirty/clean blocks
805
*/
806
static inline void unlink_changed(BLOCK_LINK *block)
807
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
808
  assert(block->prev_changed && *block->prev_changed == block);
1 by brian
clean slate
809
  if (block->next_changed)
810
    block->next_changed->prev_changed= block->prev_changed;
811
  *block->prev_changed= block->next_changed;
812
  block->next_changed= NULL;
813
  block->prev_changed= NULL;
814
}
815
816
817
/*
818
  Link a block into the chain of dirty/clean blocks
819
*/
820
821
static inline void link_changed(BLOCK_LINK *block, BLOCK_LINK **phead)
822
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
823
  assert(!block->next_changed);
824
  assert(!block->prev_changed);
1 by brian
clean slate
825
  block->prev_changed= phead;
826
  if ((block->next_changed= *phead))
827
    (*phead)->prev_changed= &block->next_changed;
828
  *phead= block;
829
}
830
831
832
/*
833
  Link a block in a chain of clean blocks of a file.
834
835
  SYNOPSIS
836
    link_to_file_list()
837
      keycache		Key cache handle
838
      block             Block to relink
839
      file              File to be linked to
840
      unlink            If to unlink first
841
842
  DESCRIPTION
843
    Unlink a block from whichever chain it is linked in, if it's
844
    asked for, and link it to the chain of clean blocks of the
845
    specified file.
846
847
  NOTE
848
    Please do never set/clear BLOCK_CHANGED outside of
849
    link_to_file_list() or link_to_changed_list().
850
    You would risk to damage correct counting of changed blocks
851
    and to find blocks in the wrong hash.
852
853
  RETURN
854
    void
855
*/
856
857
static void link_to_file_list(KEY_CACHE *keycache,
858
                              BLOCK_LINK *block, int file,
281 by Brian Aker
Converted myisam away from my_bool
859
                              bool unlink_block)
1 by brian
clean slate
860
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
861
  assert(block->status & BLOCK_IN_USE);
862
  assert(block->hash_link && block->hash_link->block == block);
863
  assert(block->hash_link->file == file);
1 by brian
clean slate
864
  if (unlink_block)
865
    unlink_changed(block);
866
  link_changed(block, &keycache->file_blocks[FILE_HASH(file)]);
867
  if (block->status & BLOCK_CHANGED)
868
  {
869
    block->status&= ~BLOCK_CHANGED;
870
    keycache->blocks_changed--;
871
    keycache->global_blocks_changed--;
872
  }
873
}
874
875
876
/*
877
  Re-link a block from the clean chain to the dirty chain of a file.
878
879
  SYNOPSIS
880
    link_to_changed_list()
881
      keycache		key cache handle
882
      block             block to relink
883
884
  DESCRIPTION
885
    Unlink a block from the chain of clean blocks of a file
886
    and link it to the chain of dirty blocks of the same file.
887
888
  NOTE
889
    Please do never set/clear BLOCK_CHANGED outside of
890
    link_to_file_list() or link_to_changed_list().
891
    You would risk to damage correct counting of changed blocks
892
    and to find blocks in the wrong hash.
893
894
  RETURN
895
    void
896
*/
897
898
static void link_to_changed_list(KEY_CACHE *keycache,
899
                                 BLOCK_LINK *block)
900
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
901
  assert(block->status & BLOCK_IN_USE);
902
  assert(!(block->status & BLOCK_CHANGED));
903
  assert(block->hash_link && block->hash_link->block == block);
1 by brian
clean slate
904
905
  unlink_changed(block);
906
  link_changed(block,
907
               &keycache->changed_blocks[FILE_HASH(block->hash_link->file)]);
908
  block->status|=BLOCK_CHANGED;
909
  keycache->blocks_changed++;
910
  keycache->global_blocks_changed++;
911
}
912
913
914
/*
915
  Link a block to the LRU chain at the beginning or at the end of
916
  one of two parts.
917
918
  SYNOPSIS
919
    link_block()
920
      keycache            pointer to a key cache data structure
921
      block               pointer to the block to link to the LRU chain
922
      hot                 <-> to link the block into the hot subchain
923
      at_end              <-> to link the block at the end of the subchain
924
925
  RETURN VALUE
926
    none
927
928
  NOTES.
929
    The LRU ring is represented by a circular list of block structures.
930
    The list is double-linked of the type (**prev,*next) type.
931
    The LRU ring is divided into two parts - hot and warm.
932
    There are two pointers to access the last blocks of these two
933
    parts. The beginning of the warm part follows right after the
934
    end of the hot part.
935
    Only blocks of the warm part can be used for eviction.
936
    The first block from the beginning of this subchain is always
937
    taken for eviction (keycache->last_used->next)
938
939
    LRU chain:       +------+   H O T    +------+
940
                +----| end  |----...<----| beg  |----+
941
                |    +------+last        +------+    |
942
                v<-link in latest hot (new end)      |
943
                |     link in latest warm (new end)->^
944
                |    +------+  W A R M   +------+    |
945
                +----| beg  |---->...----| end  |----+
946
                     +------+            +------+ins
947
                  first for eviction
948
949
    It is also possible that the block is selected for eviction and thus
950
    not linked in the LRU ring.
951
*/
952
281 by Brian Aker
Converted myisam away from my_bool
953
static void link_block(KEY_CACHE *keycache, BLOCK_LINK *block, bool hot,
954
                       bool at_end)
1 by brian
clean slate
955
{
956
  BLOCK_LINK *ins;
957
  BLOCK_LINK **pins;
958
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
959
  assert((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE));
960
  assert(block->hash_link); /*backptr to block NULL from free_block()*/
961
  assert(!block->requests);
962
  assert(block->prev_changed && *block->prev_changed == block);
963
  assert(!block->next_used);
964
  assert(!block->prev_used);
1 by brian
clean slate
965
  if (!hot && keycache->waiting_for_block.last_thread)
966
  {
967
    /* Signal that in the LRU warm sub-chain an available block has appeared */
968
    struct st_my_thread_var *last_thread=
969
                               keycache->waiting_for_block.last_thread;
970
    struct st_my_thread_var *first_thread= last_thread->next;
971
    struct st_my_thread_var *next_thread= first_thread;
972
    HASH_LINK *hash_link= (HASH_LINK *) first_thread->opt_info;
973
    struct st_my_thread_var *thread;
974
    do
975
    {
976
      thread= next_thread;
977
      next_thread= thread->next;
978
      /*
979
         We notify about the event all threads that ask
980
         for the same page as the first thread in the queue
981
      */
982
      if ((HASH_LINK *) thread->opt_info == hash_link)
983
      {
984
        keycache_pthread_cond_signal(&thread->suspend);
985
        unlink_from_queue(&keycache->waiting_for_block, thread);
986
        block->requests++;
987
      }
988
    }
989
    while (thread != last_thread);
990
    hash_link->block= block;
991
    /*
992
      NOTE: We assigned the block to the hash_link and signalled the
993
      requesting thread(s). But it is possible that other threads runs
994
      first. These threads see the hash_link assigned to a block which
995
      is assigned to another hash_link and not marked BLOCK_IN_SWITCH.
996
      This can be a problem for functions that do not select the block
997
      via its hash_link: flush and free. They do only see a block which
998
      is in a "normal" state and don't know that it will be evicted soon.
999
1000
      We cannot set BLOCK_IN_SWITCH here because only one of the
1001
      requesting threads must handle the eviction. All others must wait
1002
      for it to complete. If we set the flag here, the threads would not
1003
      know who is in charge of the eviction. Without the flag, the first
1004
      thread takes the stick and sets the flag.
1005
1006
      But we need to note in the block that is has been selected for
1007
      eviction. It must not be freed. The evicting thread will not
1008
      expect the block in the free list. Before freeing we could also
1009
      check if block->requests > 1. But I think including another flag
1010
      in the check of block->status is slightly more efficient and
1011
      probably easier to read.
1012
    */
1013
    block->status|= BLOCK_IN_EVICTION;
1014
    return;
1015
  }
1016
  pins= hot ? &keycache->used_ins : &keycache->used_last;
1017
  ins= *pins;
1018
  if (ins)
1019
  {
1020
    ins->next_used->prev_used= &block->next_used;
1021
    block->next_used= ins->next_used;
1022
    block->prev_used= &ins->next_used;
1023
    ins->next_used= block;
1024
    if (at_end)
1025
      *pins= block;
1026
  }
1027
  else
1028
  {
1029
    /* The LRU ring is empty. Let the block point to itself. */
1030
    keycache->used_last= keycache->used_ins= block->next_used= block;
1031
    block->prev_used= &block->next_used;
1032
  }
1033
}
1034
1035
1036
/*
1037
  Unlink a block from the LRU chain
1038
1039
  SYNOPSIS
1040
    unlink_block()
1041
      keycache            pointer to a key cache data structure
1042
      block               pointer to the block to unlink from the LRU chain
1043
1044
  RETURN VALUE
1045
    none
1046
1047
  NOTES.
1048
    See NOTES for link_block
1049
*/
1050
1051
static void unlink_block(KEY_CACHE *keycache, BLOCK_LINK *block)
1052
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1053
  assert((block->status & ~BLOCK_CHANGED) == (BLOCK_READ | BLOCK_IN_USE));
1054
  assert(block->hash_link); /*backptr to block NULL from free_block()*/
1055
  assert(!block->requests);
1056
  assert(block->prev_changed && *block->prev_changed == block);
1057
  assert(block->next_used && block->prev_used &&
1 by brian
clean slate
1058
              (block->next_used->prev_used == &block->next_used) &&
1059
              (*block->prev_used == block));
1060
  if (block->next_used == block)
1061
    /* The list contains only one member */
1062
    keycache->used_last= keycache->used_ins= NULL;
1063
  else
1064
  {
1065
    block->next_used->prev_used= block->prev_used;
1066
    *block->prev_used= block->next_used;
1067
    if (keycache->used_last == block)
1068
      keycache->used_last= STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used);
1069
    if (keycache->used_ins == block)
1070
      keycache->used_ins=STRUCT_PTR(BLOCK_LINK, next_used, block->prev_used);
1071
  }
1072
  block->next_used= NULL;
1073
  block->prev_used= NULL;
1074
}
1075
1076
1077
/*
1078
  Register requests for a block.
1079
1080
  SYNOPSIS
1081
    reg_requests()
1082
      keycache          Pointer to a key cache data structure.
1083
      block             Pointer to the block to register a request on.
1084
      count             Number of requests. Always 1.
1085
1086
  NOTE
1087
    The first request unlinks the block from the LRU ring. This means
1088
    that it is protected against eveiction.
1089
1090
  RETURN
1091
    void
1092
*/
1093
static void reg_requests(KEY_CACHE *keycache, BLOCK_LINK *block, int count)
1094
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1095
  assert(block->status & BLOCK_IN_USE);
1096
  assert(block->hash_link);
1 by brian
clean slate
1097
1098
  if (!block->requests)
1099
    unlink_block(keycache, block);
1100
  block->requests+=count;
1101
}
1102
1103
1104
/*
1105
  Unregister request for a block
1106
  linking it to the LRU chain if it's the last request
1107
1108
  SYNOPSIS
1109
    unreg_request()
1110
    keycache            pointer to a key cache data structure
1111
    block               pointer to the block to link to the LRU chain
1112
    at_end              <-> to link the block at the end of the LRU chain
1113
1114
  RETURN VALUE
1115
    none
1116
1117
  NOTES.
1118
    Every linking to the LRU ring decrements by one a special block
163 by Brian Aker
Merge Monty's code.
1119
    counter (if it's positive). If the at_end parameter is true the block is
1 by brian
clean slate
1120
    added either at the end of warm sub-chain or at the end of hot sub-chain.
1121
    It is added to the hot subchain if its counter is zero and number of
1122
    blocks in warm sub-chain is not less than some low limit (determined by
1123
    the division_limit parameter). Otherwise the block is added to the warm
163 by Brian Aker
Merge Monty's code.
1124
    sub-chain. If the at_end parameter is false the block is always added
1 by brian
clean slate
1125
    at beginning of the warm sub-chain.
1126
    Thus a warm block can be promoted to the hot sub-chain when its counter
1127
    becomes zero for the first time.
1128
    At the same time  the block at the very beginning of the hot subchain
1129
    might be moved to the beginning of the warm subchain if it stays untouched
1130
    for a too long time (this time is determined by parameter age_threshold).
1131
1132
    It is also possible that the block is selected for eviction and thus
1133
    not linked in the LRU ring.
1134
*/
1135
1136
static void unreg_request(KEY_CACHE *keycache,
1137
                          BLOCK_LINK *block, int at_end)
1138
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1139
  assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1140
  assert(block->hash_link); /*backptr to block NULL from free_block()*/
1141
  assert(block->requests);
1142
  assert(block->prev_changed && *block->prev_changed == block);
1143
  assert(!block->next_used);
1144
  assert(!block->prev_used);
1 by brian
clean slate
1145
  if (! --block->requests)
1146
  {
281 by Brian Aker
Converted myisam away from my_bool
1147
    bool hot;
1 by brian
clean slate
1148
    if (block->hits_left)
1149
      block->hits_left--;
1150
    hot= !block->hits_left && at_end &&
1151
      keycache->warm_blocks > keycache->min_warm_blocks;
1152
    if (hot)
1153
    {
1154
      if (block->temperature == BLOCK_WARM)
1155
        keycache->warm_blocks--;
1156
      block->temperature= BLOCK_HOT;
1157
    }
281 by Brian Aker
Converted myisam away from my_bool
1158
    link_block(keycache, block, hot, (bool)at_end);
1 by brian
clean slate
1159
    block->last_hit_time= keycache->keycache_time;
1160
    keycache->keycache_time++;
1161
    /*
1162
      At this place, the block might be in the LRU ring or not. If an
1163
      evicter was waiting for a block, it was selected for eviction and
1164
      not linked in the LRU ring.
1165
    */
1166
1167
    /*
1168
      Check if we should link a hot block to the warm block sub-chain.
1169
      It is possible that we select the same block as above. But it can
1170
      also be another block. In any case a block from the LRU ring is
1171
      selected. In other words it works even if the above block was
1172
      selected for eviction and not linked in the LRU ring. Since this
1173
      happens only if the LRU ring is empty, the block selected below
1174
      would be NULL and the rest of the function skipped.
1175
    */
1176
    block= keycache->used_ins;
1177
    if (block && keycache->keycache_time - block->last_hit_time >
1178
	keycache->age_threshold)
1179
    {
1180
      unlink_block(keycache, block);
1181
      link_block(keycache, block, 0, 0);
1182
      if (block->temperature != BLOCK_WARM)
1183
      {
1184
        keycache->warm_blocks++;
1185
        block->temperature= BLOCK_WARM;
1186
      }
1187
    }
1188
  }
1189
}
1190
1191
/*
1192
  Remove a reader of the page in block
1193
*/
1194
1195
static void remove_reader(BLOCK_LINK *block)
1196
{
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1197
  assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1198
  assert(block->hash_link && block->hash_link->block == block);
1199
  assert(block->prev_changed && *block->prev_changed == block);
1200
  assert(!block->next_used);
1201
  assert(!block->prev_used);
1202
  assert(block->hash_link->requests);
1 by brian
clean slate
1203
  if (! --block->hash_link->requests && block->condvar)
1204
    keycache_pthread_cond_signal(block->condvar);
1205
}
1206
1207
1208
/*
1209
  Wait until the last reader of the page in block
1210
  signals on its termination
1211
*/
1212
1213
static void wait_for_readers(KEY_CACHE *keycache,
1214
                             BLOCK_LINK *block)
1215
{
1216
  struct st_my_thread_var *thread= my_thread_var;
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1217
  assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1218
  assert(!(block->status & (BLOCK_ERROR | BLOCK_IN_FLUSH |
1 by brian
clean slate
1219
                                 BLOCK_CHANGED)));
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1220
  assert(block->hash_link);
1221
  assert(block->hash_link->block == block);
1 by brian
clean slate
1222
  /* Linked in file_blocks or changed_blocks hash. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1223
  assert(block->prev_changed && *block->prev_changed == block);
1 by brian
clean slate
1224
  /* Not linked in LRU ring. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1225
  assert(!block->next_used);
1226
  assert(!block->prev_used);
1 by brian
clean slate
1227
  while (block->hash_link->requests)
1228
  {
1229
    /* There must be no other waiter. We have no queue here. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1230
    assert(!block->condvar);
1 by brian
clean slate
1231
    block->condvar= &thread->suspend;
1232
    keycache_pthread_cond_wait(&thread->suspend, &keycache->cache_lock);
1233
    block->condvar= NULL;
1234
  }
1235
}
1236
1237
1238
/*
1239
  Add a hash link to a bucket in the hash_table
1240
*/
1241
1242
static inline void link_hash(HASH_LINK **start, HASH_LINK *hash_link)
1243
{
1244
  if (*start)
1245
    (*start)->prev= &hash_link->next;
1246
  hash_link->next= *start;
1247
  hash_link->prev= start;
1248
  *start= hash_link;
1249
}
1250
1251
1252
/*
1253
  Remove a hash link from the hash table
1254
*/
1255
1256
static void unlink_hash(KEY_CACHE *keycache, HASH_LINK *hash_link)
1257
{
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1258
  assert(hash_link->requests == 0);
1 by brian
clean slate
1259
  if ((*hash_link->prev= hash_link->next))
1260
    hash_link->next->prev= hash_link->prev;
1261
  hash_link->block= NULL;
1262
  if (keycache->waiting_for_hash_link.last_thread)
1263
  {
1264
    /* Signal that a free hash link has appeared */
1265
    struct st_my_thread_var *last_thread=
1266
                               keycache->waiting_for_hash_link.last_thread;
1267
    struct st_my_thread_var *first_thread= last_thread->next;
1268
    struct st_my_thread_var *next_thread= first_thread;
1269
    KEYCACHE_PAGE *first_page= (KEYCACHE_PAGE *) (first_thread->opt_info);
1270
    struct st_my_thread_var *thread;
1271
1272
    hash_link->file= first_page->file;
1273
    hash_link->diskpos= first_page->filepos;
1274
    do
1275
    {
1276
      KEYCACHE_PAGE *page;
1277
      thread= next_thread;
1278
      page= (KEYCACHE_PAGE *) thread->opt_info;
1279
      next_thread= thread->next;
1280
      /*
1281
         We notify about the event all threads that ask
1282
         for the same page as the first thread in the queue
1283
      */
1284
      if (page->file == hash_link->file && page->filepos == hash_link->diskpos)
1285
      {
1286
        keycache_pthread_cond_signal(&thread->suspend);
1287
        unlink_from_queue(&keycache->waiting_for_hash_link, thread);
1288
      }
1289
    }
1290
    while (thread != last_thread);
1291
    link_hash(&keycache->hash_root[KEYCACHE_HASH(hash_link->file,
1292
					         hash_link->diskpos)],
1293
              hash_link);
1294
    return;
1295
  }
1296
  hash_link->next= keycache->free_hash_list;
1297
  keycache->free_hash_list= hash_link;
1298
}
1299
1300
1301
/*
1302
  Get the hash link for a page
1303
*/
1304
1305
static HASH_LINK *get_hash_link(KEY_CACHE *keycache,
1306
                                int file, my_off_t filepos)
1307
{
1308
  register HASH_LINK *hash_link, **start;
1309
1310
restart:
1311
  /*
1312
     Find the bucket in the hash table for the pair (file, filepos);
1313
     start contains the head of the bucket list,
1314
     hash_link points to the first member of the list
1315
  */
1316
  hash_link= *(start= &keycache->hash_root[KEYCACHE_HASH(file, filepos)]);
1317
  /* Look for an element for the pair (file, filepos) in the bucket chain */
1318
  while (hash_link &&
1319
         (hash_link->diskpos != filepos || hash_link->file != file))
1320
  {
1321
    hash_link= hash_link->next;
1322
  }
1323
  if (! hash_link)
1324
  {
1325
    /* There is no hash link in the hash table for the pair (file, filepos) */
1326
    if (keycache->free_hash_list)
1327
    {
1328
      hash_link= keycache->free_hash_list;
1329
      keycache->free_hash_list= hash_link->next;
1330
    }
1331
    else if (keycache->hash_links_used < keycache->hash_links)
1332
    {
1333
      hash_link= &keycache->hash_link_root[keycache->hash_links_used++];
1334
    }
1335
    else
1336
    {
1337
      /* Wait for a free hash link */
1338
      struct st_my_thread_var *thread= my_thread_var;
1339
      KEYCACHE_PAGE page;
1340
      page.file= file;
1341
      page.filepos= filepos;
1342
      thread->opt_info= (void *) &page;
1343
      link_into_queue(&keycache->waiting_for_hash_link, thread);
1344
      keycache_pthread_cond_wait(&thread->suspend,
1345
                                 &keycache->cache_lock);
1346
      thread->opt_info= NULL;
1347
      goto restart;
1348
    }
1349
    hash_link->file= file;
1350
    hash_link->diskpos= filepos;
1351
    link_hash(start, hash_link);
1352
  }
1353
  /* Register the request for the page */
1354
  hash_link->requests++;
1355
1356
  return hash_link;
1357
}
1358
1359
1360
/*
1361
  Get a block for the file page requested by a keycache read/write operation;
1362
  If the page is not in the cache return a free block, if there is none
1363
  return the lru block after saving its buffer if the page is dirty.
1364
1365
  SYNOPSIS
1366
1367
    find_key_block()
1368
      keycache            pointer to a key cache data structure
1369
      file                handler for the file to read page from
1370
      filepos             position of the page in the file
1371
      init_hits_left      how initialize the block counter for the page
1372
      wrmode              <-> get for writing
1373
      page_st        out  {PAGE_READ,PAGE_TO_BE_READ,PAGE_WAIT_TO_BE_READ}
1374
1375
  RETURN VALUE
1376
    Pointer to the found block if successful, 0 - otherwise
1377
1378
  NOTES.
1379
    For the page from file positioned at filepos the function checks whether
1380
    the page is in the key cache specified by the first parameter.
1381
    If this is the case it immediately returns the block.
1382
    If not, the function first chooses  a block for this page. If there is
1383
    no not used blocks in the key cache yet, the function takes the block
1384
    at the very beginning of the warm sub-chain. It saves the page in that
1385
    block if it's dirty before returning the pointer to it.
1386
    The function returns in the page_st parameter the following values:
1387
      PAGE_READ         - if page already in the block,
1388
      PAGE_TO_BE_READ   - if it is to be read yet by the current thread
1389
      WAIT_TO_BE_READ   - if it is to be read by another thread
1390
    If an error occurs THE BLOCK_ERROR bit is set in the block status.
1391
    It might happen that there are no blocks in LRU chain (in warm part) -
1392
    all blocks  are unlinked for some read/write operations. Then the function
1393
    waits until first of this operations links any block back.
1394
*/
1395
1396
static BLOCK_LINK *find_key_block(KEY_CACHE *keycache,
1397
                                  File file, my_off_t filepos,
1398
                                  int init_hits_left,
1399
                                  int wrmode, int *page_st)
1400
{
1401
  HASH_LINK *hash_link;
1402
  BLOCK_LINK *block;
1403
  int error= 0;
1404
  int page_status;
1405
1406
restart:
1407
  /*
1408
    If the flush phase of a resize operation fails, the cache is left
1409
    unusable. This will be detected only after "goto restart".
1410
  */
1411
  if (!keycache->can_be_used)
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
1412
    return(0);
1 by brian
clean slate
1413
1414
  /*
1415
    Find the hash_link for the requested file block (file, filepos). We
1416
    do always get a hash_link here. It has registered our request so
1417
    that no other thread can use it for another file block until we
1418
    release the request (which is done by remove_reader() usually). The
1419
    hash_link can have a block assigned to it or not. If there is a
1420
    block, it may be assigned to this hash_link or not. In cases where a
1421
    block is evicted from the cache, it is taken from the LRU ring and
1422
    referenced by the new hash_link. But the block can still be assigned
1423
    to its old hash_link for some time if it needs to be flushed first,
1424
    or if there are other threads still reading it.
1425
1426
    Summary:
1427
      hash_link is always returned.
1428
      hash_link->block can be:
1429
      - NULL or
1430
      - not assigned to this hash_link or
1431
      - assigned to this hash_link. If assigned, the block can have
1432
        - invalid data (when freshly assigned) or
1433
        - valid data. Valid data can be
1434
          - changed over the file contents (dirty) or
1435
          - not changed (clean).
1436
  */
1437
  hash_link= get_hash_link(keycache, file, filepos);
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1438
  assert((hash_link->file == file) && (hash_link->diskpos == filepos));
1 by brian
clean slate
1439
1440
  page_status= -1;
1441
  if ((block= hash_link->block) &&
1442
      block->hash_link == hash_link && (block->status & BLOCK_READ))
1443
  {
1444
    /* Assigned block with valid (changed or unchanged) contents. */
1445
    page_status= PAGE_READ;
1446
  }
1447
  /*
1448
    else (page_status == -1)
1449
      - block == NULL or
1450
      - block not assigned to this hash_link or
1451
      - block assigned but not yet read from file (invalid data).
1452
  */
1453
1454
  if (keycache->in_resize)
1455
  {
1456
    /* This is a request during a resize operation */
1457
1458
    if (!block)
1459
    {
1460
      struct st_my_thread_var *thread;
1461
1462
      /*
1463
        The file block is not in the cache. We don't need it in the
1464
        cache: we are going to read or write directly to file. Cancel
1465
        the request. We can simply decrement hash_link->requests because
1466
        we did not release cache_lock since increasing it. So no other
1467
        thread can wait for our request to become released.
1468
      */
1469
      if (hash_link->requests == 1)
1470
      {
1471
        /*
1472
          We are the only one to request this hash_link (this file/pos).
1473
          Free the hash_link.
1474
        */
1475
        hash_link->requests--;
1476
        unlink_hash(keycache, hash_link);
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
1477
        return(0);
1 by brian
clean slate
1478
      }
1479
1480
      /*
1481
        More requests on the hash_link. Someone tries to evict a block
1482
        for this hash_link (could have started before resizing started).
1483
        This means that the LRU ring is empty. Otherwise a block could
1484
        be assigned immediately. Behave like a thread that wants to
1485
        evict a block for this file/pos. Add to the queue of threads
1486
        waiting for a block. Wait until there is one assigned.
1487
1488
        Refresh the request on the hash-link so that it cannot be reused
1489
        for another file/pos.
1490
      */
1491
      thread= my_thread_var;
1492
      thread->opt_info= (void *) hash_link;
1493
      link_into_queue(&keycache->waiting_for_block, thread);
1494
      do
1495
      {
1496
        keycache_pthread_cond_wait(&thread->suspend,
1497
                                   &keycache->cache_lock);
1498
      } while (thread->next);
1499
      thread->opt_info= NULL;
1500
      /*
1501
        A block should now be assigned to the hash_link. But it may
1502
        still need to be evicted. Anyway, we should re-check the
1503
        situation. page_status must be set correctly.
1504
      */
1505
      hash_link->requests--;
1506
      goto restart;
1507
    } /* end of if (!block) */
1508
1509
    /*
1510
      There is a block for this file/pos in the cache. Register a
1511
      request on it. This unlinks it from the LRU ring (if it is there)
1512
      and hence protects it against eviction (if not already in
1513
      eviction). We need this for returning the block to the caller, for
1514
      calling remove_reader() (for debugging purposes), and for calling
1515
      free_block(). The only case where we don't need the request is if
1516
      the block is in eviction. In that case we have to unregister the
1517
      request later.
1518
    */
1519
    reg_requests(keycache, block, 1);
1520
1521
    if (page_status != PAGE_READ)
1522
    {
1523
      /*
1524
        - block not assigned to this hash_link or
1525
        - block assigned but not yet read from file (invalid data).
1526
1527
        This must be a block in eviction. It will be read soon. We need
1528
        to wait here until this happened. Otherwise the caller could
1529
        access a wrong block or a block which is in read. While waiting
1530
        we cannot lose hash_link nor block. We have registered a request
1531
        on the hash_link. Everything can happen to the block but changes
1532
        in the hash_link -> block relationship. In other words:
1533
        everything can happen to the block but free or another completed
1534
        eviction.
1535
1536
        Note that we bahave like a secondary requestor here. We just
1537
        cannot return with PAGE_WAIT_TO_BE_READ. This would work for
1538
        read requests and writes on dirty blocks that are not in flush
1539
        only. Waiting here on COND_FOR_REQUESTED works in all
1540
        situations.
1541
      */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1542
      assert(((block->hash_link != hash_link) &&
1 by brian
clean slate
1543
                   (block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) ||
1544
                  ((block->hash_link == hash_link) &&
1545
                   !(block->status & BLOCK_READ)));
1546
      wait_on_queue(&block->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock);
1547
      /*
1548
        Here we can trust that the block has been assigned to this
1549
        hash_link (block->hash_link == hash_link) and read into the
1550
        buffer (BLOCK_READ). The worst things possible here are that the
1551
        block is in free (BLOCK_REASSIGNED). But the block is still
1552
        assigned to the hash_link. The freeing thread waits until we
1553
        release our request on the hash_link. The block must not be
1554
        again in eviction because we registered an request on it before
1555
        starting to wait.
1556
      */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1557
      assert(block->hash_link == hash_link);
1558
      assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1559
      assert(!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH)));
1 by brian
clean slate
1560
    }
1561
    /*
1562
      The block is in the cache. Assigned to the hash_link. Valid data.
1563
      Note that in case of page_st == PAGE_READ, the block can be marked
1564
      for eviction. In any case it can be marked for freeing.
1565
    */
1566
1567
    if (!wrmode)
1568
    {
1569
      /* A reader can just read the block. */
1570
      *page_st= PAGE_READ;
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1571
      assert((hash_link->file == file) &&
1 by brian
clean slate
1572
                  (hash_link->diskpos == filepos) &&
1573
                  (block->hash_link == hash_link));
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
1574
      return(block);
1 by brian
clean slate
1575
    }
1576
1577
    /*
1578
      This is a writer. No two writers for the same block can exist.
1579
      This must be assured by locks outside of the key cache.
1580
    */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1581
    assert(!(block->status & BLOCK_FOR_UPDATE));
1 by brian
clean slate
1582
1583
    while (block->status & BLOCK_IN_FLUSH)
1584
    {
1585
      /*
1586
        Wait until the block is flushed to file. Do not release the
1587
        request on the hash_link yet to prevent that the block is freed
1588
        or reassigned while we wait. While we wait, several things can
1589
        happen to the block, including another flush. But the block
1590
        cannot be reassigned to another hash_link until we release our
1591
        request on it. But it can be marked BLOCK_REASSIGNED from free
1592
        or eviction, while they wait for us to release the hash_link.
1593
      */
1594
      wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock);
1595
      /*
1596
        If the flush phase failed, the resize could have finished while
1597
        we waited here.
1598
      */
1599
      if (!keycache->in_resize)
1600
      {
1601
        remove_reader(block);
1602
        unreg_request(keycache, block, 1);
1603
        goto restart;
1604
      }
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1605
      assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1606
      assert(!(block->status & BLOCK_FOR_UPDATE));
1607
      assert(block->hash_link == hash_link);
1 by brian
clean slate
1608
    }
1609
1610
    if (block->status & BLOCK_CHANGED)
1611
    {
1612
      /*
1613
        We want to write a block with changed contents. If the cache
1614
        block size is bigger than the callers block size (e.g. MyISAM),
1615
        the caller may replace part of the block only. Changes of the
1616
        other part of the block must be preserved. Since the block has
1617
        not yet been selected for flush, we can still add our changes.
1618
      */
1619
      *page_st= PAGE_READ;
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1620
      assert((hash_link->file == file) &&
1 by brian
clean slate
1621
                  (hash_link->diskpos == filepos) &&
1622
                  (block->hash_link == hash_link));
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
1623
      return(block);
1 by brian
clean slate
1624
    }
1625
1626
    /*
1627
      This is a write request for a clean block. We do not want to have
1628
      new dirty blocks in the cache while resizing. We will free the
1629
      block and write directly to file. If the block is in eviction or
1630
      in free, we just let it go.
1631
1632
      Unregister from the hash_link. This must be done before freeing
1633
      the block. And it must be done if not freeing the block. Because
1634
      we could have waited above, we need to call remove_reader(). Other
1635
      threads could wait for us to release our request on the hash_link.
1636
    */
1637
    remove_reader(block);
1638
1639
    /* If the block is not in eviction and not in free, we can free it. */
1640
    if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
1641
                           BLOCK_REASSIGNED)))
1642
    {
1643
      /*
1644
        Free block as we are going to write directly to file.
1645
        Although we have an exlusive lock for the updated key part,
1646
        the control can be yielded by the current thread as we might
1647
        have unfinished readers of other key parts in the block
1648
        buffer. Still we are guaranteed not to have any readers
1649
        of the key part we are writing into until the block is
1650
        removed from the cache as we set the BLOCK_REASSIGNED
1651
        flag (see the code below that handles reading requests).
1652
      */
1653
      free_block(keycache, block);
1654
    }
1655
    else
1656
    {
1657
      /*
1658
        The block will be evicted/freed soon. Don't touch it in any way.
1659
        Unregister the request that we registered above.
1660
      */
1661
      unreg_request(keycache, block, 1);
1662
1663
      /*
1664
        The block is still assigned to the hash_link (the file/pos that
1665
        we are going to write to). Wait until the eviction/free is
1666
        complete. Otherwise the direct write could complete before all
1667
        readers are done with the block. So they could read outdated
1668
        data.
1669
1670
        Since we released our request on the hash_link, it can be reused
1671
        for another file/pos. Hence we cannot just check for
1672
        block->hash_link == hash_link. As long as the resize is
1673
        proceeding the block cannot be reassigned to the same file/pos
1674
        again. So we can terminate the loop when the block is no longer
1675
        assigned to this file/pos.
1676
      */
1677
      do
1678
      {
1679
        wait_on_queue(&block->wqueue[COND_FOR_SAVED],
1680
                      &keycache->cache_lock);
1681
        /*
1682
          If the flush phase failed, the resize could have finished
1683
          while we waited here.
1684
        */
1685
        if (!keycache->in_resize)
1686
          goto restart;
1687
      } while (block->hash_link &&
1688
               (block->hash_link->file == file) &&
1689
               (block->hash_link->diskpos == filepos));
1690
    }
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
1691
    return(0);
1 by brian
clean slate
1692
  }
1693
1694
  if (page_status == PAGE_READ &&
1695
      (block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
1696
                        BLOCK_REASSIGNED)))
1697
  {
1698
    /*
1699
      This is a request for a block to be removed from cache. The block
1700
      is assigned to this hash_link and contains valid data, but is
1701
      marked for eviction or to be freed. Possible reasons why it has
1702
      not yet been evicted/freed can be a flush before reassignment
1703
      (BLOCK_IN_SWITCH), readers of the block have not finished yet
1704
      (BLOCK_REASSIGNED), or the evicting thread did not yet awake after
1705
      the block has been selected for it (BLOCK_IN_EVICTION).
1706
1707
       Only reading requests can proceed until the old dirty page is flushed,
1708
       all others are to be suspended, then resubmitted
1709
    */
1710
    if (!wrmode && !(block->status & BLOCK_REASSIGNED))
1711
    {
1712
      /*
1713
        This is a read request and the block not yet reassigned. We can
1714
        register our request and proceed. This unlinks the block from
1715
        the LRU ring and protects it against eviction.
1716
      */
1717
      reg_requests(keycache, block, 1);
1718
    }
1719
    else
1720
    {
1721
      /*
1722
        Either this is a write request for a block that is in eviction
1723
        or in free. We must not use it any more. Instead we must evict
1724
        another block. But we cannot do this before the eviction/free is
1725
        done. Otherwise we would find the same hash_link + block again
1726
        and again.
1727
1728
        Or this is a read request for a block in eviction/free that does
1729
        not require a flush, but waits for readers to finish with the
1730
        block. We do not read this block to let the eviction/free happen
1731
        as soon as possible. Again we must wait so that we don't find
1732
        the same hash_link + block again and again.
1733
      */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1734
      assert(hash_link->requests);
1 by brian
clean slate
1735
      hash_link->requests--;
1736
      wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock);
1737
      /*
1738
        The block is no longer assigned to this hash_link.
1739
        Get another one.
1740
      */
1741
      goto restart;
1742
    }
1743
  }
1744
  else
1745
  {
1746
    /*
1747
      This is a request for a new block or for a block not to be removed.
1748
      Either
1749
      - block == NULL or
1750
      - block not assigned to this hash_link or
1751
      - block assigned but not yet read from file,
1752
      or
1753
      - block assigned with valid (changed or unchanged) data and
1754
      - it will not be reassigned/freed.
1755
    */
1756
    if (! block)
1757
    {
1758
      /* No block is assigned to the hash_link yet. */
1759
      if (keycache->blocks_unused)
1760
      {
1761
        if (keycache->free_block_list)
1762
        {
1763
          /* There is a block in the free list. */
1764
          block= keycache->free_block_list;
1765
          keycache->free_block_list= block->next_used;
1766
          block->next_used= NULL;
1767
        }
1768
        else
1769
        {
1770
          /* There are some never used blocks, take first of them */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1771
          assert(keycache->blocks_used <
303 by Brian Aker
First pass in removing ulong from MyISAM
1772
                      (uint32_t) keycache->disk_blocks);
1 by brian
clean slate
1773
          block= &keycache->block_root[keycache->blocks_used];
1774
          block->buffer= ADD_TO_PTR(keycache->block_mem,
303 by Brian Aker
First pass in removing ulong from MyISAM
1775
                                    ((uint32_t) keycache->blocks_used*
1 by brian
clean slate
1776
                                     keycache->key_cache_block_size),
481 by Brian Aker
Remove all of uchar.
1777
                                    unsigned char*);
1 by brian
clean slate
1778
          keycache->blocks_used++;
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1779
          assert(!block->next_used);
1 by brian
clean slate
1780
        }
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1781
        assert(!block->prev_used);
1782
        assert(!block->next_changed);
1783
        assert(!block->prev_changed);
1784
        assert(!block->hash_link);
1785
        assert(!block->status);
1786
        assert(!block->requests);
1 by brian
clean slate
1787
        keycache->blocks_unused--;
1788
        block->status= BLOCK_IN_USE;
1789
        block->length= 0;
1790
        block->offset= keycache->key_cache_block_size;
1791
        block->requests= 1;
1792
        block->temperature= BLOCK_COLD;
1793
        block->hits_left= init_hits_left;
1794
        block->last_hit_time= 0;
1795
        block->hash_link= hash_link;
1796
        hash_link->block= block;
1797
        link_to_file_list(keycache, block, file, 0);
1798
        page_status= PAGE_TO_BE_READ;
1799
      }
1800
      else
1801
      {
1802
	/*
1803
          There are no free blocks and no never used blocks, use a block
1804
          from the LRU ring.
1805
        */
1806
1807
        if (! keycache->used_last)
1808
        {
1809
          /*
1810
            The LRU ring is empty. Wait until a new block is added to
1811
            it. Several threads might wait here for the same hash_link,
1812
            all of them must get the same block. While waiting for a
1813
            block, after a block is selected for this hash_link, other
1814
            threads can run first before this one awakes. During this
1815
            time interval other threads find this hash_link pointing to
1816
            the block, which is still assigned to another hash_link. In
1817
            this case the block is not marked BLOCK_IN_SWITCH yet, but
1818
            it is marked BLOCK_IN_EVICTION.
1819
          */
1820
1821
          struct st_my_thread_var *thread= my_thread_var;
1822
          thread->opt_info= (void *) hash_link;
1823
          link_into_queue(&keycache->waiting_for_block, thread);
1824
          do
1825
          {
1826
            keycache_pthread_cond_wait(&thread->suspend,
1827
                                       &keycache->cache_lock);
1828
          }
1829
          while (thread->next);
1830
          thread->opt_info= NULL;
1831
          /* Assert that block has a request registered. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1832
          assert(hash_link->block->requests);
1 by brian
clean slate
1833
          /* Assert that block is not in LRU ring. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1834
          assert(!hash_link->block->next_used);
1835
          assert(!hash_link->block->prev_used);
1 by brian
clean slate
1836
        }
1837
        /*
1838
          If we waited above, hash_link->block has been assigned by
1839
          link_block(). Otherwise it is still NULL. In the latter case
1840
          we need to grab a block from the LRU ring ourselves.
1841
        */
1842
        block= hash_link->block;
1843
        if (! block)
1844
        {
1845
          /* Select the last block from the LRU ring. */
1846
          block= keycache->used_last->next_used;
1847
          block->hits_left= init_hits_left;
1848
          block->last_hit_time= 0;
1849
          hash_link->block= block;
1850
          /*
1851
            Register a request on the block. This unlinks it from the
1852
            LRU ring and protects it against eviction.
1853
          */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1854
          assert(!block->requests);
1 by brian
clean slate
1855
          reg_requests(keycache, block,1);
1856
          /*
1857
            We do not need to set block->status|= BLOCK_IN_EVICTION here
1858
            because we will set block->status|= BLOCK_IN_SWITCH
1859
            immediately without releasing the lock in between. This does
1860
            also support debugging. When looking at the block, one can
1861
            see if the block has been selected by link_block() after the
1862
            LRU ring was empty, or if it was grabbed directly from the
1863
            LRU ring in this branch.
1864
          */
1865
        }
1866
1867
        /*
1868
          If we had to wait above, there is a small chance that another
1869
          thread grabbed this block for the same file block already. But
1870
          in most cases the first condition is true.
1871
        */
1872
        if (block->hash_link != hash_link &&
1873
	    ! (block->status & BLOCK_IN_SWITCH) )
1874
        {
1875
	  /* this is a primary request for a new page */
1876
          block->status|= BLOCK_IN_SWITCH;
1877
1878
          if (block->status & BLOCK_CHANGED)
1879
          {
1880
	    /* The block contains a dirty page - push it out of the cache */
1881
1882
            if (block->status & BLOCK_IN_FLUSH)
1883
            {
1884
              /*
1885
                The block is marked for flush. If we do not wait here,
1886
                it could happen that we write the block, reassign it to
1887
                another file block, then, before the new owner can read
1888
                the new file block, the flusher writes the cache block
1889
                (which still has the old contents) to the new file block!
1890
              */
1891
              wait_on_queue(&block->wqueue[COND_FOR_SAVED],
1892
                            &keycache->cache_lock);
1893
              /*
1894
                The block is marked BLOCK_IN_SWITCH. It should be left
1895
                alone except for reading. No free, no write.
1896
              */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1897
              assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1898
              assert(!(block->status & (BLOCK_REASSIGNED |
1 by brian
clean slate
1899
                                             BLOCK_CHANGED |
1900
                                             BLOCK_FOR_UPDATE)));
1901
            }
1902
            else
1903
            {
1904
              block->status|= BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE;
1905
              /*
1906
                BLOCK_IN_EVICTION may be true or not. Other flags must
1907
                have a fixed value.
1908
              */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1909
              assert((block->status & ~BLOCK_IN_EVICTION) ==
1 by brian
clean slate
1910
                          (BLOCK_READ | BLOCK_IN_SWITCH |
1911
                           BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE |
1912
                           BLOCK_CHANGED | BLOCK_IN_USE));
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1913
              assert(block->hash_link);
1 by brian
clean slate
1914
1915
              keycache_pthread_mutex_unlock(&keycache->cache_lock);
1916
              /*
1917
                The call is thread safe because only the current
1918
                thread might change the block->hash_link value
1919
              */
32 by Brian Aker
More cleanup on pread()
1920
              error= (pwrite(block->hash_link->file,
1921
                             block->buffer+block->offset,
1922
                             block->length - block->offset,
1923
                             block->hash_link->diskpos+ block->offset) == 0);
1 by brian
clean slate
1924
              keycache_pthread_mutex_lock(&keycache->cache_lock);
1925
1926
              /* Block status must not have changed. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1927
              assert((block->status & ~BLOCK_IN_EVICTION) ==
1 by brian
clean slate
1928
                          (BLOCK_READ | BLOCK_IN_SWITCH |
1929
                           BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE |
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1930
                           BLOCK_CHANGED | BLOCK_IN_USE));
1 by brian
clean slate
1931
              keycache->global_cache_write++;
1932
            }
1933
          }
1934
1935
          block->status|= BLOCK_REASSIGNED;
1936
          /*
1937
            The block comes from the LRU ring. It must have a hash_link
1938
            assigned.
1939
          */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1940
          assert(block->hash_link);
1 by brian
clean slate
1941
          if (block->hash_link)
1942
          {
1943
            /*
1944
              All pending requests for this page must be resubmitted.
1945
              This must be done before waiting for readers. They could
1946
              wait for the flush to complete. And we must also do it
1947
              after the wait. Flushers might try to free the block while
1948
              we wait. They would wait until the reassignment is
1949
              complete. Also the block status must reflect the correct
1950
              situation: The block is not changed nor in flush any more.
1951
              Note that we must not change the BLOCK_CHANGED flag
1952
              outside of link_to_file_list() so that it is always in the
1953
              correct queue and the *blocks_changed counters are
1954
              correct.
1955
            */
1956
            block->status&= ~(BLOCK_IN_FLUSH | BLOCK_IN_FLUSHWRITE);
1957
            link_to_file_list(keycache, block, block->hash_link->file, 1);
1958
            release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
1959
            /*
1960
              The block is still assigned to its old hash_link.
1961
	      Wait until all pending read requests
1962
	      for this page are executed
1963
	      (we could have avoided this waiting, if we had read
1964
	      a page in the cache in a sweep, without yielding control)
1965
            */
1966
            wait_for_readers(keycache, block);
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1967
            assert(block->hash_link && block->hash_link->block == block &&
1 by brian
clean slate
1968
                        block->prev_changed);
1969
            /* The reader must not have been a writer. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1970
            assert(!(block->status & BLOCK_CHANGED));
1 by brian
clean slate
1971
1972
            /* Wake flushers that might have found the block in between. */
1973
            release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
1974
1975
            /* Remove the hash link for the old file block from the hash. */
1976
            unlink_hash(keycache, block->hash_link);
1977
1978
            /*
1979
              For sanity checks link_to_file_list() asserts that block
1980
              and hash_link refer to each other. Hence we need to assign
1981
              the hash_link first, but then we would not know if it was
1982
              linked before. Hence we would not know if to unlink it. So
163 by Brian Aker
Merge Monty's code.
1983
              unlink it here and call link_to_file_list(..., false).
1 by brian
clean slate
1984
            */
1985
            unlink_changed(block);
1986
          }
1987
          block->status= error ? BLOCK_ERROR : BLOCK_IN_USE ;
1988
          block->length= 0;
1989
          block->offset= keycache->key_cache_block_size;
1990
          block->hash_link= hash_link;
1991
          link_to_file_list(keycache, block, file, 0);
1992
          page_status= PAGE_TO_BE_READ;
1993
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
1994
          assert(block->hash_link->block == block);
1995
          assert(hash_link->block->hash_link == hash_link);
1 by brian
clean slate
1996
        }
1997
        else
1998
        {
1999
          /*
2000
            Either (block->hash_link == hash_link),
2001
	    or     (block->status & BLOCK_IN_SWITCH).
2002
2003
            This is for secondary requests for a new file block only.
2004
            Either it is already assigned to the new hash_link meanwhile
2005
            (if we had to wait due to empty LRU), or it is already in
2006
            eviction by another thread. Since this block has been
2007
            grabbed from the LRU ring and attached to this hash_link,
2008
            another thread cannot grab the same block from the LRU ring
2009
            anymore. If the block is in eviction already, it must become
2010
            attached to the same hash_link and as such destined for the
2011
            same file block.
2012
          */
2013
          page_status= (((block->hash_link == hash_link) &&
2014
                         (block->status & BLOCK_READ)) ?
2015
                        PAGE_READ : PAGE_WAIT_TO_BE_READ);
2016
        }
2017
      }
2018
    }
2019
    else
2020
    {
2021
      /*
2022
        Block is not NULL. This hash_link points to a block.
2023
        Either
2024
        - block not assigned to this hash_link (yet) or
2025
        - block assigned but not yet read from file,
2026
        or
2027
        - block assigned with valid (changed or unchanged) data and
2028
        - it will not be reassigned/freed.
2029
2030
        The first condition means hash_link points to a block in
2031
        eviction. This is not necessarily marked by BLOCK_IN_SWITCH yet.
2032
        But then it is marked BLOCK_IN_EVICTION. See the NOTE in
2033
        link_block(). In both cases it is destined for this hash_link
2034
        and its file block address. When this hash_link got its block
2035
        address, the block was removed from the LRU ring and cannot be
2036
        selected for eviction (for another hash_link) again.
2037
2038
        Register a request on the block. This is another protection
2039
        against eviction.
2040
      */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2041
      assert(((block->hash_link != hash_link) &&
1 by brian
clean slate
2042
                   (block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))) ||
2043
                  ((block->hash_link == hash_link) &&
2044
                   !(block->status & BLOCK_READ)) ||
2045
                  ((block->status & BLOCK_READ) &&
2046
                   !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH))));
2047
      reg_requests(keycache, block, 1);
2048
      page_status= (((block->hash_link == hash_link) &&
2049
                     (block->status & BLOCK_READ)) ?
2050
                    PAGE_READ : PAGE_WAIT_TO_BE_READ);
2051
    }
2052
  }
2053
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2054
  assert(page_status != -1);
1 by brian
clean slate
2055
  /* Same assert basically, but be very sure. */
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2056
  assert(block);
1 by brian
clean slate
2057
  /* Assert that block has a request and is not in LRU ring. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2058
  assert(block->requests);
2059
  assert(!block->next_used);
2060
  assert(!block->prev_used);
1 by brian
clean slate
2061
  /* Assert that we return the correct block. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2062
  assert((page_status == PAGE_WAIT_TO_BE_READ) ||
1 by brian
clean slate
2063
              ((block->hash_link->file == file) &&
2064
               (block->hash_link->diskpos == filepos)));
2065
  *page_st=page_status;
2066
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
2067
  return(block);
1 by brian
clean slate
2068
}
2069
2070
2071
/*
2072
  Read into a key cache block buffer from disk.
2073
2074
  SYNOPSIS
2075
2076
    read_block()
2077
      keycache            pointer to a key cache data structure
2078
      block               block to which buffer the data is to be read
2079
      read_length         size of data to be read
2080
      min_length          at least so much data must be read
2081
      primary             <-> the current thread will read the data
2082
2083
  RETURN VALUE
2084
    None
2085
2086
  NOTES.
2087
    The function either reads a page data from file to the block buffer,
2088
    or waits until another thread reads it. What page to read is determined
2089
    by a block parameter - reference to a hash link for this page.
2090
    If an error occurs THE BLOCK_ERROR bit is set in the block status.
2091
    We do not report error when the size of successfully read
2092
    portion is less than read_length, but not less than min_length.
2093
*/
2094
2095
static void read_block(KEY_CACHE *keycache,
482 by Brian Aker
Remove uint.
2096
                       BLOCK_LINK *block, uint32_t read_length,
2097
                       uint32_t min_length, bool primary)
1 by brian
clean slate
2098
{
482 by Brian Aker
Remove uint.
2099
  uint32_t got_length;
1 by brian
clean slate
2100
2101
  /* On entry cache_lock is locked */
2102
2103
  if (primary)
2104
  {
2105
    /*
2106
      This code is executed only by threads that submitted primary
2107
      requests. Until block->status contains BLOCK_READ, all other
2108
      request for the block become secondary requests. For a primary
2109
      request the block must be properly initialized.
2110
    */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2111
    assert(((block->status & ~BLOCK_FOR_UPDATE) == BLOCK_IN_USE));
2112
    assert((block->length == 0));
2113
    assert((block->offset == keycache->key_cache_block_size));
2114
    assert((block->requests > 0));
1 by brian
clean slate
2115
2116
    keycache->global_cache_read++;
2117
    /* Page is not in buffer yet, is to be read from disk */
2118
    keycache_pthread_mutex_unlock(&keycache->cache_lock);
2119
    /*
2120
      Here other threads may step in and register as secondary readers.
2121
      They will register in block->wqueue[COND_FOR_REQUESTED].
2122
    */
32 by Brian Aker
More cleanup on pread()
2123
    got_length= pread(block->hash_link->file, block->buffer, read_length, block->hash_link->diskpos);
1 by brian
clean slate
2124
    keycache_pthread_mutex_lock(&keycache->cache_lock);
2125
    /*
2126
      The block can now have been marked for free (in case of
2127
      FLUSH_RELEASE). Otherwise the state must be unchanged.
2128
    */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2129
    assert(((block->status & ~(BLOCK_REASSIGNED |
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2130
                                    BLOCK_FOR_UPDATE)) == BLOCK_IN_USE));
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2131
    assert((block->length == 0));
2132
    assert((block->offset == keycache->key_cache_block_size));
2133
    assert((block->requests > 0));
1 by brian
clean slate
2134
2135
    if (got_length < min_length)
2136
      block->status|= BLOCK_ERROR;
2137
    else
2138
    {
2139
      block->status|= BLOCK_READ;
2140
      block->length= got_length;
2141
      /*
2142
        Do not set block->offset here. If this block is marked
2143
        BLOCK_CHANGED later, we want to flush only the modified part. So
2144
        only a writer may set block->offset down from
2145
        keycache->key_cache_block_size.
2146
      */
2147
    }
2148
    /* Signal that all pending requests for this page now can be processed */
2149
    release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]);
2150
  }
2151
  else
2152
  {
2153
    /*
2154
      This code is executed only by threads that submitted secondary
2155
      requests. At this point it could happen that the cache block is
2156
      not yet assigned to the hash_link for the requested file block.
2157
      But at awake from the wait this should be the case. Unfortunately
2158
      we cannot assert this here because we do not know the hash_link
2159
      for the requested file block nor the file and position. So we have
2160
      to assert this in the caller.
2161
    */
2162
    wait_on_queue(&block->wqueue[COND_FOR_REQUESTED], &keycache->cache_lock);
2163
  }
2164
}
2165
2166
2167
/*
2168
  Read a block of data from a cached file into a buffer;
2169
2170
  SYNOPSIS
2171
2172
    key_cache_read()
2173
      keycache            pointer to a key cache data structure
2174
      file                handler for the file for the block of data to be read
2175
      filepos             position of the block of data in the file
2176
      level               determines the weight of the data
2177
      buff                buffer to where the data must be placed
2178
      length              length of the buffer
2179
      block_length        length of the block in the key cache buffer
2180
      return_buffer       return pointer to the key cache buffer with the data
2181
2182
  RETURN VALUE
2183
    Returns address from where the data is placed if sucessful, 0 - otherwise.
2184
2185
  NOTES.
2186
    The function ensures that a block of data of size length from file
2187
    positioned at filepos is in the buffers for some key cache blocks.
2188
    Then the function either copies the data into the buffer buff, or,
163 by Brian Aker
Merge Monty's code.
2189
    if return_buffer is true, it just returns the pointer to the key cache
1 by brian
clean slate
2190
    buffer with the data.
2191
    Filepos must be a multiple of 'block_length', but it doesn't
2192
    have to be a multiple of key_cache_block_size;
2193
*/
2194
481 by Brian Aker
Remove all of uchar.
2195
unsigned char *key_cache_read(KEY_CACHE *keycache,
1 by brian
clean slate
2196
                      File file, my_off_t filepos, int level,
482 by Brian Aker
Remove uint.
2197
                      unsigned char *buff, uint32_t length,
2198
                      uint32_t block_length __attribute__((unused)),
1 by brian
clean slate
2199
                      int return_buffer __attribute__((unused)))
2200
{
281 by Brian Aker
Converted myisam away from my_bool
2201
  bool locked_and_incremented= false;
1 by brian
clean slate
2202
  int error=0;
481 by Brian Aker
Remove all of uchar.
2203
  unsigned char *start= buff;
1 by brian
clean slate
2204
2205
  if (keycache->key_cache_inited)
2206
  {
2207
    /* Key cache is used */
2208
    register BLOCK_LINK *block;
482 by Brian Aker
Remove uint.
2209
    uint32_t read_length;
2210
    uint32_t offset;
2211
    uint32_t status;
1 by brian
clean slate
2212
    int page_st;
2213
2214
    /*
2215
      When the key cache is once initialized, we use the cache_lock to
2216
      reliably distinguish the cases of normal operation, resizing, and
2217
      disabled cache. We always increment and decrement
2218
      'cnt_for_resize_op' so that a resizer can wait for pending I/O.
2219
    */
2220
    keycache_pthread_mutex_lock(&keycache->cache_lock);
2221
    /*
2222
      Cache resizing has two phases: Flushing and re-initializing. In
2223
      the flush phase read requests are allowed to bypass the cache for
2224
      blocks not in the cache. find_key_block() returns NULL in this
2225
      case.
2226
2227
      After the flush phase new I/O requests must wait until the
2228
      re-initialization is done. The re-initialization can be done only
2229
      if no I/O request is in progress. The reason is that
2230
      key_cache_block_size can change. With enabled cache, I/O is done
2231
      in chunks of key_cache_block_size. Every chunk tries to use a
2232
      cache block first. If the block size changes in the middle, a
2233
      block could be missed and old data could be read.
2234
    */
2235
    while (keycache->in_resize && !keycache->resize_in_flush)
2236
      wait_on_queue(&keycache->resize_queue, &keycache->cache_lock);
2237
    /* Register the I/O for the next resize. */
2238
    inc_counter_for_resize_op(keycache);
163 by Brian Aker
Merge Monty's code.
2239
    locked_and_incremented= true;
1 by brian
clean slate
2240
    /* Requested data may not always be aligned to cache blocks. */
2241
    offset= (uint) (filepos % keycache->key_cache_block_size);
2242
    /* Read data in key_cache_block_size increments */
2243
    do
2244
    {
2245
      /* Cache could be disabled in a later iteration. */
2246
2247
      if (!keycache->can_be_used)
2248
	goto no_key_cache;
2249
      /* Start reading at the beginning of the cache block. */
2250
      filepos-= offset;
2251
      /* Do not read beyond the end of the cache block. */
2252
      read_length= length;
2253
      set_if_smaller(read_length, keycache->key_cache_block_size-offset);
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2254
      assert(read_length > 0);
1 by brian
clean slate
2255
2256
      /* Request the cache block that matches file/pos. */
2257
      keycache->global_cache_r_requests++;
2258
      block=find_key_block(keycache, file, filepos, level, 0, &page_st);
2259
      if (!block)
2260
      {
2261
        /*
2262
          This happens only for requests submitted during key cache
2263
          resize. The block is not in the cache and shall not go in.
2264
          Read directly from file.
2265
        */
2266
        keycache->global_cache_read++;
2267
        keycache_pthread_mutex_unlock(&keycache->cache_lock);
481 by Brian Aker
Remove all of uchar.
2268
        error= (pread(file, (unsigned char*) buff, read_length, filepos + offset) == 0);
1 by brian
clean slate
2269
        keycache_pthread_mutex_lock(&keycache->cache_lock);
2270
        goto next_block;
2271
      }
2272
      if (!(block->status & BLOCK_ERROR))
2273
      {
2274
        if (page_st != PAGE_READ)
2275
        {
2276
          /* The requested page is to be read into the block buffer */
2277
          read_block(keycache, block,
2278
                     keycache->key_cache_block_size, read_length+offset,
281 by Brian Aker
Converted myisam away from my_bool
2279
                     (bool)(page_st == PAGE_TO_BE_READ));
1 by brian
clean slate
2280
          /*
2281
            A secondary request must now have the block assigned to the
2282
            requested file block. It does not hurt to check it for
2283
            primary requests too.
2284
          */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2285
          assert(keycache->can_be_used);
2286
          assert(block->hash_link->file == file);
2287
          assert(block->hash_link->diskpos == filepos);
2288
          assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1 by brian
clean slate
2289
        }
2290
        else if (block->length < read_length + offset)
2291
        {
2292
          /*
2293
            Impossible if nothing goes wrong:
2294
            this could only happen if we are using a file with
2295
            small key blocks and are trying to read outside the file
2296
          */
2297
          my_errno= -1;
2298
          block->status|= BLOCK_ERROR;
2299
        }
2300
      }
2301
2302
      /* block status may have added BLOCK_ERROR in the above 'if'. */
2303
      if (!((status= block->status) & BLOCK_ERROR))
2304
      {
2305
        {
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2306
          assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1 by brian
clean slate
2307
#if !defined(SERIALIZED_READ_FROM_CACHE)
2308
          keycache_pthread_mutex_unlock(&keycache->cache_lock);
2309
#endif
2310
2311
          /* Copy data from the cache buffer */
266.1.22 by Monty Taylor
Removed bmove512.
2312
          memcpy(buff, block->buffer+offset, (size_t) read_length);
1 by brian
clean slate
2313
2314
#if !defined(SERIALIZED_READ_FROM_CACHE)
2315
          keycache_pthread_mutex_lock(&keycache->cache_lock);
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2316
          assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1 by brian
clean slate
2317
#endif
2318
        }
2319
      }
2320
2321
      remove_reader(block);
2322
2323
      /*
2324
         Link the block into the LRU ring if it's the last submitted
2325
         request for the block. This enables eviction for the block.
2326
           */
2327
      unreg_request(keycache, block, 1);
2328
2329
      if (status & BLOCK_ERROR)
2330
      {
2331
        error= 1;
2332
        break;
2333
      }
2334
2335
  next_block:
2336
      buff+= read_length;
2337
      filepos+= read_length+offset;
2338
      offset= 0;
2339
2340
    } while ((length-= read_length));
2341
    goto end;
2342
  }
2343
2344
no_key_cache:
2345
  /* Key cache is not used */
2346
2347
  keycache->global_cache_r_requests++;
2348
  keycache->global_cache_read++;
2349
2350
  if (locked_and_incremented)
2351
    keycache_pthread_mutex_unlock(&keycache->cache_lock);
481 by Brian Aker
Remove all of uchar.
2352
  if (pread(file, (unsigned char*) buff, length, filepos))
1 by brian
clean slate
2353
    error= 1;
2354
  if (locked_and_incremented)
2355
    keycache_pthread_mutex_lock(&keycache->cache_lock);
2356
2357
end:
2358
  if (locked_and_incremented)
2359
  {
2360
    dec_counter_for_resize_op(keycache);
2361
    keycache_pthread_mutex_unlock(&keycache->cache_lock);
2362
  }
481 by Brian Aker
Remove all of uchar.
2363
  return(error ? (unsigned char*) 0 : start);
1 by brian
clean slate
2364
}
2365
2366
2367
/*
2368
  Insert a block of file data from a buffer into key cache
2369
2370
  SYNOPSIS
2371
    key_cache_insert()
2372
    keycache            pointer to a key cache data structure
2373
    file                handler for the file to insert data from
2374
    filepos             position of the block of data in the file to insert
2375
    level               determines the weight of the data
2376
    buff                buffer to read data from
2377
    length              length of the data in the buffer
2378
2379
  NOTES
2380
    This is used by MyISAM to move all blocks from a index file to the key
2381
    cache
2382
2383
  RETURN VALUE
2384
    0 if a success, 1 - otherwise.
2385
*/
2386
2387
int key_cache_insert(KEY_CACHE *keycache,
2388
                     File file, my_off_t filepos, int level,
482 by Brian Aker
Remove uint.
2389
                     unsigned char *buff, uint32_t length)
1 by brian
clean slate
2390
{
2391
  int error= 0;
2392
2393
  if (keycache->key_cache_inited)
2394
  {
2395
    /* Key cache is used */
2396
    register BLOCK_LINK *block;
482 by Brian Aker
Remove uint.
2397
    uint32_t read_length;
2398
    uint32_t offset;
1 by brian
clean slate
2399
    int page_st;
281 by Brian Aker
Converted myisam away from my_bool
2400
    bool locked_and_incremented= false;
1 by brian
clean slate
2401
2402
    /*
2403
      When the keycache is once initialized, we use the cache_lock to
2404
      reliably distinguish the cases of normal operation, resizing, and
2405
      disabled cache. We always increment and decrement
2406
      'cnt_for_resize_op' so that a resizer can wait for pending I/O.
2407
    */
2408
    keycache_pthread_mutex_lock(&keycache->cache_lock);
2409
    /*
2410
      We do not load index data into a disabled cache nor into an
2411
      ongoing resize.
2412
    */
2413
    if (!keycache->can_be_used || keycache->in_resize)
2414
	goto no_key_cache;
2415
    /* Register the pseudo I/O for the next resize. */
2416
    inc_counter_for_resize_op(keycache);
163 by Brian Aker
Merge Monty's code.
2417
    locked_and_incremented= true;
1 by brian
clean slate
2418
    /* Loaded data may not always be aligned to cache blocks. */
2419
    offset= (uint) (filepos % keycache->key_cache_block_size);
2420
    /* Load data in key_cache_block_size increments. */
2421
    do
2422
    {
2423
      /* Cache could be disabled or resizing in a later iteration. */
2424
      if (!keycache->can_be_used || keycache->in_resize)
2425
	goto no_key_cache;
2426
      /* Start loading at the beginning of the cache block. */
2427
      filepos-= offset;
2428
      /* Do not load beyond the end of the cache block. */
2429
      read_length= length;
2430
      set_if_smaller(read_length, keycache->key_cache_block_size-offset);
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2431
      assert(read_length > 0);
1 by brian
clean slate
2432
2433
      /* The block has been read by the caller already. */
2434
      keycache->global_cache_read++;
2435
      /* Request the cache block that matches file/pos. */
2436
      keycache->global_cache_r_requests++;
2437
      block= find_key_block(keycache, file, filepos, level, 0, &page_st);
2438
      if (!block)
2439
      {
2440
        /*
2441
          This happens only for requests submitted during key cache
2442
          resize. The block is not in the cache and shall not go in.
2443
          Stop loading index data.
2444
        */
2445
        goto no_key_cache;
2446
      }
2447
      if (!(block->status & BLOCK_ERROR))
2448
      {
2449
        if ((page_st == PAGE_WAIT_TO_BE_READ) ||
2450
            ((page_st == PAGE_TO_BE_READ) &&
2451
             (offset || (read_length < keycache->key_cache_block_size))))
2452
        {
2453
          /*
2454
            Either
2455
2456
            this is a secondary request for a block to be read into the
2457
            cache. The block is in eviction. It is not yet assigned to
2458
            the requested file block (It does not point to the right
2459
            hash_link). So we cannot call remove_reader() on the block.
2460
            And we cannot access the hash_link directly here. We need to
2461
            wait until the assignment is complete. read_block() executes
163 by Brian Aker
Merge Monty's code.
2462
            the correct wait when called with primary == false.
1 by brian
clean slate
2463
2464
            Or
2465
2466
            this is a primary request for a block to be read into the
2467
            cache and the supplied data does not fill the whole block.
2468
2469
            This function is called on behalf of a LOAD INDEX INTO CACHE
2470
            statement, which is a read-only task and allows other
2471
            readers. It is possible that a parallel running reader tries
2472
            to access this block. If it needs more data than has been
2473
            supplied here, it would report an error. To be sure that we
2474
            have all data in the block that is available in the file, we
2475
            read the block ourselves.
2476
2477
            Though reading again what the caller did read already is an
2478
            expensive operation, we need to do this for correctness.
2479
          */
2480
          read_block(keycache, block, keycache->key_cache_block_size,
2481
                     read_length + offset, (page_st == PAGE_TO_BE_READ));
2482
          /*
2483
            A secondary request must now have the block assigned to the
2484
            requested file block. It does not hurt to check it for
2485
            primary requests too.
2486
          */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2487
          assert(keycache->can_be_used);
2488
          assert(block->hash_link->file == file);
2489
          assert(block->hash_link->diskpos == filepos);
2490
          assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1 by brian
clean slate
2491
        }
2492
        else if (page_st == PAGE_TO_BE_READ)
2493
        {
2494
          /*
2495
            This is a new block in the cache. If we come here, we have
2496
            data for the whole block.
2497
          */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2498
          assert(block->hash_link->requests);
2499
          assert(block->status & BLOCK_IN_USE);
2500
          assert((page_st == PAGE_TO_BE_READ) ||
1 by brian
clean slate
2501
                      (block->status & BLOCK_READ));
2502
2503
#if !defined(SERIALIZED_READ_FROM_CACHE)
2504
          keycache_pthread_mutex_unlock(&keycache->cache_lock);
2505
          /*
2506
            Here other threads may step in and register as secondary readers.
2507
            They will register in block->wqueue[COND_FOR_REQUESTED].
2508
          */
2509
#endif
2510
2511
          /* Copy data from buff */
266.1.22 by Monty Taylor
Removed bmove512.
2512
          memcpy(block->buffer+offset, buff, (size_t) read_length);
1 by brian
clean slate
2513
2514
#if !defined(SERIALIZED_READ_FROM_CACHE)
2515
          keycache_pthread_mutex_lock(&keycache->cache_lock);
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2516
          assert(block->status & BLOCK_IN_USE);
2517
          assert((page_st == PAGE_TO_BE_READ) ||
1 by brian
clean slate
2518
                      (block->status & BLOCK_READ));
2519
#endif
2520
          /*
2521
            After the data is in the buffer, we can declare the block
2522
            valid. Now other threads do not need to register as
2523
            secondary readers any more. They can immediately access the
2524
            block.
2525
          */
2526
          block->status|= BLOCK_READ;
2527
          block->length= read_length+offset;
2528
          /*
2529
            Do not set block->offset here. If this block is marked
2530
            BLOCK_CHANGED later, we want to flush only the modified part. So
2531
            only a writer may set block->offset down from
2532
            keycache->key_cache_block_size.
2533
          */
2534
          /* Signal all pending requests. */
2535
          release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]);
2536
        }
2537
        else
2538
        {
2539
          /*
2540
            page_st == PAGE_READ. The block is in the buffer. All data
2541
            must already be present. Blocks are always read with all
2542
            data available on file. Assert that the block does not have
2543
            less contents than the preloader supplies. If the caller has
2544
            data beyond block->length, it means that a file write has
2545
            been done while this block was in cache and not extended
2546
            with the new data. If the condition is met, we can simply
2547
            ignore the block.
2548
          */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2549
          assert((page_st == PAGE_READ) &&
1 by brian
clean slate
2550
                      (read_length + offset <= block->length));
2551
        }
2552
2553
        /*
2554
          A secondary request must now have the block assigned to the
2555
          requested file block. It does not hurt to check it for primary
2556
          requests too.
2557
        */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2558
        assert(block->hash_link->file == file);
2559
        assert(block->hash_link->diskpos == filepos);
2560
        assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1 by brian
clean slate
2561
      } /* end of if (!(block->status & BLOCK_ERROR)) */
2562
2563
2564
      remove_reader(block);
2565
2566
      /*
2567
         Link the block into the LRU ring if it's the last submitted
2568
         request for the block. This enables eviction for the block.
2569
      */
2570
      unreg_request(keycache, block, 1);
2571
2572
      error= (block->status & BLOCK_ERROR);
2573
2574
      if (error)
2575
        break;
2576
2577
      buff+= read_length;
2578
      filepos+= read_length+offset;
2579
      offset= 0;
2580
2581
    } while ((length-= read_length));
2582
2583
  no_key_cache:
2584
    if (locked_and_incremented)
2585
      dec_counter_for_resize_op(keycache);
2586
    keycache_pthread_mutex_unlock(&keycache->cache_lock);
2587
  }
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
2588
  return(error);
1 by brian
clean slate
2589
}
2590
2591
2592
/*
2593
  Write a buffer into a cached file.
2594
2595
  SYNOPSIS
2596
2597
    key_cache_write()
2598
      keycache            pointer to a key cache data structure
2599
      file                handler for the file to write data to
2600
      filepos             position in the file to write data to
2601
      level               determines the weight of the data
2602
      buff                buffer with the data
2603
      length              length of the buffer
2604
      dont_write          if is 0 then all dirty pages involved in writing
2605
                          should have been flushed from key cache
2606
2607
  RETURN VALUE
2608
    0 if a success, 1 - otherwise.
2609
2610
  NOTES.
2611
    The function copies the data of size length from buff into buffers
2612
    for key cache blocks that are  assigned to contain the portion of
2613
    the file starting with position filepos.
163 by Brian Aker
Merge Monty's code.
2614
    It ensures that this data is flushed to the file if dont_write is false.
1 by brian
clean slate
2615
    Filepos must be a multiple of 'block_length', but it doesn't
2616
    have to be a multiple of key_cache_block_size;
2617
163 by Brian Aker
Merge Monty's code.
2618
    dont_write is always true in the server (info->lock_type is never F_UNLCK).
1 by brian
clean slate
2619
*/
2620
2621
int key_cache_write(KEY_CACHE *keycache,
2622
                    File file, my_off_t filepos, int level,
482 by Brian Aker
Remove uint.
2623
                    unsigned char *buff, uint32_t length,
2624
                    uint32_t block_length  __attribute__((unused)),
1 by brian
clean slate
2625
                    int dont_write)
2626
{
281 by Brian Aker
Converted myisam away from my_bool
2627
  bool locked_and_incremented= false;
1 by brian
clean slate
2628
  int error=0;
2629
2630
  if (!dont_write)
2631
  {
2632
    /* purecov: begin inspected */
2633
    /* Not used in the server. */
2634
    /* Force writing from buff into disk. */
2635
    keycache->global_cache_w_requests++;
2636
    keycache->global_cache_write++;
32 by Brian Aker
More cleanup on pread()
2637
    if (pwrite(file, buff, length, filepos) == 0)
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
2638
      return(1);
1 by brian
clean slate
2639
    /* purecov: end */
2640
  }
2641
2642
  if (keycache->key_cache_inited)
2643
  {
2644
    /* Key cache is used */
2645
    register BLOCK_LINK *block;
482 by Brian Aker
Remove uint.
2646
    uint32_t read_length;
2647
    uint32_t offset;
1 by brian
clean slate
2648
    int page_st;
2649
2650
    /*
2651
      When the key cache is once initialized, we use the cache_lock to
2652
      reliably distinguish the cases of normal operation, resizing, and
2653
      disabled cache. We always increment and decrement
2654
      'cnt_for_resize_op' so that a resizer can wait for pending I/O.
2655
    */
2656
    keycache_pthread_mutex_lock(&keycache->cache_lock);
2657
    /*
2658
      Cache resizing has two phases: Flushing and re-initializing. In
2659
      the flush phase write requests can modify dirty blocks that are
2660
      not yet in flush. Otherwise they are allowed to bypass the cache.
2661
      find_key_block() returns NULL in both cases (clean blocks and
2662
      non-cached blocks).
2663
2664
      After the flush phase new I/O requests must wait until the
2665
      re-initialization is done. The re-initialization can be done only
2666
      if no I/O request is in progress. The reason is that
2667
      key_cache_block_size can change. With enabled cache I/O is done in
2668
      chunks of key_cache_block_size. Every chunk tries to use a cache
2669
      block first. If the block size changes in the middle, a block
2670
      could be missed and data could be written below a cached block.
2671
    */
2672
    while (keycache->in_resize && !keycache->resize_in_flush)
2673
      wait_on_queue(&keycache->resize_queue, &keycache->cache_lock);
2674
    /* Register the I/O for the next resize. */
2675
    inc_counter_for_resize_op(keycache);
163 by Brian Aker
Merge Monty's code.
2676
    locked_and_incremented= true;
1 by brian
clean slate
2677
    /* Requested data may not always be aligned to cache blocks. */
2678
    offset= (uint) (filepos % keycache->key_cache_block_size);
2679
    /* Write data in key_cache_block_size increments. */
2680
    do
2681
    {
2682
      /* Cache could be disabled in a later iteration. */
2683
      if (!keycache->can_be_used)
2684
	goto no_key_cache;
2685
      /* Start writing at the beginning of the cache block. */
2686
      filepos-= offset;
2687
      /* Do not write beyond the end of the cache block. */
2688
      read_length= length;
2689
      set_if_smaller(read_length, keycache->key_cache_block_size-offset);
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2690
      assert(read_length > 0);
1 by brian
clean slate
2691
2692
      /* Request the cache block that matches file/pos. */
2693
      keycache->global_cache_w_requests++;
2694
      block= find_key_block(keycache, file, filepos, level, 1, &page_st);
2695
      if (!block)
2696
      {
2697
        /*
2698
          This happens only for requests submitted during key cache
2699
          resize. The block is not in the cache and shall not go in.
2700
          Write directly to file.
2701
        */
2702
        if (dont_write)
2703
        {
2704
          /* Used in the server. */
2705
          keycache->global_cache_write++;
2706
          keycache_pthread_mutex_unlock(&keycache->cache_lock);
481 by Brian Aker
Remove all of uchar.
2707
          if (pwrite(file, (unsigned char*) buff, read_length, filepos + offset) == 0)
1 by brian
clean slate
2708
            error=1;
2709
          keycache_pthread_mutex_lock(&keycache->cache_lock);
2710
        }
2711
        goto next_block;
2712
      }
2713
      /*
2714
        Prevent block from flushing and from being selected for to be
2715
        freed. This must be set when we release the cache_lock.
2716
        However, we must not set the status of the block before it is
2717
        assigned to this file/pos.
2718
      */
2719
      if (page_st != PAGE_WAIT_TO_BE_READ)
2720
        block->status|= BLOCK_FOR_UPDATE;
2721
      /*
2722
        We must read the file block first if it is not yet in the cache
2723
        and we do not replace all of its contents.
2724
2725
        In cases where the cache block is big enough to contain (parts
2726
        of) index blocks of different indexes, our request can be
2727
        secondary (PAGE_WAIT_TO_BE_READ). In this case another thread is
2728
        reading the file block. If the read completes after us, it
2729
        overwrites our new contents with the old contents. So we have to
2730
        wait for the other thread to complete the read of this block.
2731
        read_block() takes care for the wait.
2732
      */
2733
      if (!(block->status & BLOCK_ERROR) &&
2734
          ((page_st == PAGE_TO_BE_READ &&
2735
            (offset || read_length < keycache->key_cache_block_size)) ||
2736
           (page_st == PAGE_WAIT_TO_BE_READ)))
2737
      {
2738
        read_block(keycache, block,
2739
                   offset + read_length >= keycache->key_cache_block_size?
2740
                   offset : keycache->key_cache_block_size,
2741
                   offset, (page_st == PAGE_TO_BE_READ));
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2742
        assert(keycache->can_be_used);
2743
        assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1 by brian
clean slate
2744
        /*
2745
          Prevent block from flushing and from being selected for to be
2746
          freed. This must be set when we release the cache_lock.
2747
          Here we set it in case we could not set it above.
2748
        */
2749
        block->status|= BLOCK_FOR_UPDATE;
2750
      }
2751
      /*
2752
        The block should always be assigned to the requested file block
2753
        here. It need not be BLOCK_READ when overwriting the whole block.
2754
      */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2755
      assert(block->hash_link->file == file);
2756
      assert(block->hash_link->diskpos == filepos);
2757
      assert(block->status & BLOCK_IN_USE);
2758
      assert((page_st == PAGE_TO_BE_READ) || (block->status & BLOCK_READ));
1 by brian
clean slate
2759
      /*
2760
        The block to be written must not be marked BLOCK_REASSIGNED.
2761
        Otherwise it could be freed in dirty state or reused without
2762
        another flush during eviction. It must also not be in flush.
2763
        Otherwise the old contens may have been flushed already and
2764
        the flusher could clear BLOCK_CHANGED without flushing the
2765
        new changes again.
2766
      */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2767
      assert(!(block->status & BLOCK_REASSIGNED));
1 by brian
clean slate
2768
2769
      while (block->status & BLOCK_IN_FLUSHWRITE)
2770
      {
2771
        /*
2772
          Another thread is flushing the block. It was dirty already.
2773
          Wait until the block is flushed to file. Otherwise we could
2774
          modify the buffer contents just while it is written to file.
2775
          An unpredictable file block contents would be the result.
2776
          While we wait, several things can happen to the block,
2777
          including another flush. But the block cannot be reassigned to
2778
          another hash_link until we release our request on it.
2779
        */
2780
        wait_on_queue(&block->wqueue[COND_FOR_SAVED], &keycache->cache_lock);
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2781
        assert(keycache->can_be_used);
2782
        assert(block->status & (BLOCK_READ | BLOCK_IN_USE));
1 by brian
clean slate
2783
        /* Still must not be marked for free. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2784
        assert(!(block->status & BLOCK_REASSIGNED));
2785
        assert(block->hash_link && (block->hash_link->block == block));
1 by brian
clean slate
2786
      }
2787
2788
      /*
2789
        We could perhaps release the cache_lock during access of the
2790
        data like in the other functions. Locks outside of the key cache
2791
        assure that readers and a writer do not access the same range of
2792
        data. Parallel accesses should happen only if the cache block
2793
        contains multiple index block(fragment)s. So different parts of
2794
        the buffer would be read/written. An attempt to flush during
2795
        memcpy() is prevented with BLOCK_FOR_UPDATE.
2796
      */
2797
      if (!(block->status & BLOCK_ERROR))
2798
      {
2799
#if !defined(SERIALIZED_READ_FROM_CACHE)
2800
        keycache_pthread_mutex_unlock(&keycache->cache_lock);
2801
#endif
266.1.22 by Monty Taylor
Removed bmove512.
2802
        memcpy(block->buffer+offset, buff, (size_t) read_length);
1 by brian
clean slate
2803
2804
#if !defined(SERIALIZED_READ_FROM_CACHE)
2805
        keycache_pthread_mutex_lock(&keycache->cache_lock);
2806
#endif
2807
      }
2808
2809
      if (!dont_write)
2810
      {
2811
	/* Not used in the server. buff has been written to disk at start. */
2812
        if ((block->status & BLOCK_CHANGED) &&
2813
            (!offset && read_length >= keycache->key_cache_block_size))
2814
             link_to_file_list(keycache, block, block->hash_link->file, 1);
2815
      }
2816
      else if (! (block->status & BLOCK_CHANGED))
2817
        link_to_changed_list(keycache, block);
2818
      block->status|=BLOCK_READ;
2819
      /*
2820
        Allow block to be selected for to be freed. Since it is marked
2821
        BLOCK_CHANGED too, it won't be selected for to be freed without
2822
        a flush.
2823
      */
2824
      block->status&= ~BLOCK_FOR_UPDATE;
2825
      set_if_smaller(block->offset, offset);
2826
      set_if_bigger(block->length, read_length+offset);
2827
2828
      /* Threads may be waiting for the changes to be complete. */
2829
      release_whole_queue(&block->wqueue[COND_FOR_REQUESTED]);
2830
2831
      /*
2832
        If only a part of the cache block is to be replaced, and the
2833
        rest has been read from file, then the cache lock has been
2834
        released for I/O and it could be possible that another thread
2835
        wants to evict or free the block and waits for it to be
2836
        released. So we must not just decrement hash_link->requests, but
2837
        also wake a waiting thread.
2838
      */
2839
      remove_reader(block);
2840
2841
      /*
2842
         Link the block into the LRU ring if it's the last submitted
2843
         request for the block. This enables eviction for the block.
2844
      */
2845
      unreg_request(keycache, block, 1);
2846
2847
      if (block->status & BLOCK_ERROR)
2848
      {
2849
        error= 1;
2850
        break;
2851
      }
2852
2853
    next_block:
2854
      buff+= read_length;
2855
      filepos+= read_length+offset;
2856
      offset= 0;
2857
2858
    } while ((length-= read_length));
2859
    goto end;
2860
  }
2861
2862
no_key_cache:
2863
  /* Key cache is not used */
2864
  if (dont_write)
2865
  {
2866
    /* Used in the server. */
2867
    keycache->global_cache_w_requests++;
2868
    keycache->global_cache_write++;
2869
    if (locked_and_incremented)
2870
      keycache_pthread_mutex_unlock(&keycache->cache_lock);
481 by Brian Aker
Remove all of uchar.
2871
    if (pwrite(file, (unsigned char*) buff, length, filepos) == 0)
1 by brian
clean slate
2872
      error=1;
2873
    if (locked_and_incremented)
2874
      keycache_pthread_mutex_lock(&keycache->cache_lock);
2875
  }
2876
2877
end:
2878
  if (locked_and_incremented)
2879
  {
2880
    dec_counter_for_resize_op(keycache);
2881
    keycache_pthread_mutex_unlock(&keycache->cache_lock);
2882
  }
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
2883
  return(error);
1 by brian
clean slate
2884
}
2885
2886
2887
/*
2888
  Free block.
2889
2890
  SYNOPSIS
2891
    free_block()
2892
      keycache          Pointer to a key cache data structure
2893
      block             Pointer to the block to free
2894
2895
  DESCRIPTION
2896
    Remove reference to block from hash table.
2897
    Remove block from the chain of clean blocks.
2898
    Add block to the free list.
2899
2900
  NOTE
2901
    Block must not be free (status == 0).
2902
    Block must not be in free_block_list.
2903
    Block must not be in the LRU ring.
2904
    Block must not be in eviction (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH).
2905
    Block must not be in free (BLOCK_REASSIGNED).
2906
    Block must not be in flush (BLOCK_IN_FLUSH).
2907
    Block must not be dirty (BLOCK_CHANGED).
2908
    Block must not be in changed_blocks (dirty) hash.
2909
    Block must be in file_blocks (clean) hash.
2910
    Block must refer to a hash_link.
2911
    Block must have a request registered on it.
2912
*/
2913
2914
static void free_block(KEY_CACHE *keycache, BLOCK_LINK *block)
2915
{
2916
  /*
2917
    Assert that the block is not free already. And that it is in a clean
2918
    state. Note that the block might just be assigned to a hash_link and
2919
    not yet read (BLOCK_READ may not be set here). In this case a reader
2920
    is registered in the hash_link and free_block() will wait for it
2921
    below.
2922
  */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2923
  assert((block->status & BLOCK_IN_USE) &&
1 by brian
clean slate
2924
              !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
2925
                                 BLOCK_REASSIGNED | BLOCK_IN_FLUSH |
2926
                                 BLOCK_CHANGED | BLOCK_FOR_UPDATE)));
2927
  /* Assert that the block is in a file_blocks chain. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2928
  assert(block->prev_changed && *block->prev_changed == block);
1 by brian
clean slate
2929
  /* Assert that the block is not in the LRU ring. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2930
  assert(!block->next_used && !block->prev_used);
1 by brian
clean slate
2931
  /*
2932
    IMHO the below condition (if()) makes no sense. I can't see how it
2933
    could be possible that free_block() is entered with a NULL hash_link
2934
    pointer. The only place where it can become NULL is in free_block()
2935
    (or before its first use ever, but for those blocks free_block() is
2936
    not called). I don't remove the conditional as it cannot harm, but
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2937
    place an assert to confirm my hypothesis. Eventually the
1 by brian
clean slate
2938
    condition (if()) can be removed.
2939
  */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2940
  assert(block->hash_link && block->hash_link->block == block);
1 by brian
clean slate
2941
  if (block->hash_link)
2942
  {
2943
    /*
2944
      While waiting for readers to finish, new readers might request the
2945
      block. But since we set block->status|= BLOCK_REASSIGNED, they
2946
      will wait on block->wqueue[COND_FOR_SAVED]. They must be signalled
2947
      later.
2948
    */
2949
    block->status|= BLOCK_REASSIGNED;
2950
    wait_for_readers(keycache, block);
2951
    /*
2952
      The block must not have been freed by another thread. Repeat some
2953
      checks. An additional requirement is that it must be read now
2954
      (BLOCK_READ).
2955
    */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2956
    assert(block->hash_link && block->hash_link->block == block);
2957
    assert((block->status & (BLOCK_READ | BLOCK_IN_USE |
1 by brian
clean slate
2958
                                  BLOCK_REASSIGNED)) &&
2959
                !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
2960
                                   BLOCK_IN_FLUSH | BLOCK_CHANGED |
2961
                                   BLOCK_FOR_UPDATE)));
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2962
    assert(block->prev_changed && *block->prev_changed == block);
2963
    assert(!block->prev_used);
1 by brian
clean slate
2964
    /*
2965
      Unset BLOCK_REASSIGNED again. If we hand the block to an evicting
2966
      thread (through unreg_request() below), other threads must not see
2967
      this flag. They could become confused.
2968
    */
2969
    block->status&= ~BLOCK_REASSIGNED;
2970
    /*
2971
      Do not release the hash_link until the block is off all lists.
2972
      At least not if we hand it over for eviction in unreg_request().
2973
    */
2974
  }
2975
2976
  /*
2977
    Unregister the block request and link the block into the LRU ring.
2978
    This enables eviction for the block. If the LRU ring was empty and
2979
    threads are waiting for a block, then the block wil be handed over
2980
    for eviction immediately. Otherwise we will unlink it from the LRU
2981
    ring again, without releasing the lock in between. So decrementing
2982
    the request counter and updating statistics are the only relevant
2983
    operation in this case. Assert that there are no other requests
2984
    registered.
2985
  */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2986
  assert(block->requests == 1);
1 by brian
clean slate
2987
  unreg_request(keycache, block, 0);
2988
  /*
2989
    Note that even without releasing the cache lock it is possible that
2990
    the block is immediately selected for eviction by link_block() and
2991
    thus not added to the LRU ring. In this case we must not touch the
2992
    block any more.
2993
  */
2994
  if (block->status & BLOCK_IN_EVICTION)
2995
    return;
2996
2997
  /* Here the block must be in the LRU ring. Unlink it again. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
2998
  assert(block->next_used && block->prev_used &&
1 by brian
clean slate
2999
              *block->prev_used == block);
3000
  unlink_block(keycache, block);
3001
  if (block->temperature == BLOCK_WARM)
3002
    keycache->warm_blocks--;
3003
  block->temperature= BLOCK_COLD;
3004
3005
  /* Remove from file_blocks hash. */
3006
  unlink_changed(block);
3007
3008
  /* Remove reference to block from hash table. */
3009
  unlink_hash(keycache, block->hash_link);
3010
  block->hash_link= NULL;
3011
3012
  block->status= 0;
3013
  block->length= 0;
3014
  block->offset= keycache->key_cache_block_size;
3015
3016
  /* Enforced by unlink_changed(), but just to be sure. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3017
  assert(!block->next_changed && !block->prev_changed);
1 by brian
clean slate
3018
  /* Enforced by unlink_block(): not in LRU ring nor in free_block_list. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3019
  assert(!block->next_used && !block->prev_used);
1 by brian
clean slate
3020
  /* Insert the free block in the free list. */
3021
  block->next_used= keycache->free_block_list;
3022
  keycache->free_block_list= block;
3023
  /* Keep track of the number of currently unused blocks. */
3024
  keycache->blocks_unused++;
3025
3026
  /* All pending requests for this page must be resubmitted. */
3027
  release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
3028
}
3029
3030
3031
static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b)
3032
{
3033
  return (((*a)->hash_link->diskpos < (*b)->hash_link->diskpos) ? -1 :
3034
      ((*a)->hash_link->diskpos > (*b)->hash_link->diskpos) ? 1 : 0);
3035
}
3036
3037
3038
/*
3039
  Flush a portion of changed blocks to disk,
3040
  free used blocks if requested
3041
*/
3042
3043
static int flush_cached_blocks(KEY_CACHE *keycache,
3044
                               File file, BLOCK_LINK **cache,
3045
                               BLOCK_LINK **end,
3046
                               enum flush_type type)
3047
{
3048
  int error;
3049
  int last_errno= 0;
482 by Brian Aker
Remove uint.
3050
  uint32_t count= (uint) (end-cache);
1 by brian
clean slate
3051
3052
  /* Don't lock the cache during the flush */
3053
  keycache_pthread_mutex_unlock(&keycache->cache_lock);
3054
  /*
3055
     As all blocks referred in 'cache' are marked by BLOCK_IN_FLUSH
3056
     we are guarunteed no thread will change them
3057
  */
481 by Brian Aker
Remove all of uchar.
3058
  my_qsort((unsigned char*) cache, count, sizeof(*cache), (qsort_cmp) cmp_sec_link);
1 by brian
clean slate
3059
3060
  keycache_pthread_mutex_lock(&keycache->cache_lock);
3061
  /*
3062
    Note: Do not break the loop. We have registered a request on every
3063
    block in 'cache'. These must be unregistered by free_block() or
3064
    unreg_request().
3065
  */
3066
  for ( ; cache != end ; cache++)
3067
  {
3068
    BLOCK_LINK *block= *cache;
3069
    /*
3070
      If the block contents is going to be changed, we abandon the flush
3071
      for this block. flush_key_blocks_int() will restart its search and
3072
      handle the block properly.
3073
    */
3074
    if (!(block->status & BLOCK_FOR_UPDATE))
3075
    {
3076
      /* Blocks coming here must have a certain status. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3077
      assert(block->hash_link);
3078
      assert(block->hash_link->block == block);
3079
      assert(block->hash_link->file == file);
3080
      assert((block->status & ~BLOCK_IN_EVICTION) ==
1 by brian
clean slate
3081
                  (BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE));
3082
      block->status|= BLOCK_IN_FLUSHWRITE;
3083
      keycache_pthread_mutex_unlock(&keycache->cache_lock);
32 by Brian Aker
More cleanup on pread()
3084
      error= (pwrite(file,
3085
                     block->buffer+block->offset,
3086
                     block->length - block->offset,
3087
                     block->hash_link->diskpos+ block->offset) == 0);
1 by brian
clean slate
3088
      keycache_pthread_mutex_lock(&keycache->cache_lock);
3089
      keycache->global_cache_write++;
3090
      if (error)
3091
      {
3092
        block->status|= BLOCK_ERROR;
3093
        if (!last_errno)
3094
          last_errno= errno ? errno : -1;
3095
      }
3096
      block->status&= ~BLOCK_IN_FLUSHWRITE;
3097
      /* Block must not have changed status except BLOCK_FOR_UPDATE. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3098
      assert(block->hash_link);
3099
      assert(block->hash_link->block == block);
3100
      assert(block->hash_link->file == file);
3101
      assert((block->status & ~(BLOCK_FOR_UPDATE | BLOCK_IN_EVICTION)) ==
1 by brian
clean slate
3102
                  (BLOCK_READ | BLOCK_IN_FLUSH | BLOCK_CHANGED | BLOCK_IN_USE));
3103
      /*
3104
        Set correct status and link in right queue for free or later use.
3105
        free_block() must not see BLOCK_CHANGED and it may need to wait
3106
        for readers of the block. These should not see the block in the
3107
        wrong hash. If not freeing the block, we need to have it in the
3108
        right queue anyway.
3109
      */
3110
      link_to_file_list(keycache, block, file, 1);
3111
3112
    }
3113
    block->status&= ~BLOCK_IN_FLUSH;
3114
    /*
3115
      Let to proceed for possible waiting requests to write to the block page.
3116
      It might happen only during an operation to resize the key cache.
3117
    */
3118
    release_whole_queue(&block->wqueue[COND_FOR_SAVED]);
3119
    /* type will never be FLUSH_IGNORE_CHANGED here */
3120
    if (!(type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE) &&
3121
        !(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
3122
                           BLOCK_FOR_UPDATE)))
3123
    {
3124
      /*
3125
        Note that a request has been registered against the block in
3126
        flush_key_blocks_int().
3127
      */
3128
      free_block(keycache, block);
3129
    }
3130
    else
3131
    {
3132
      /*
3133
        Link the block into the LRU ring if it's the last submitted
3134
        request for the block. This enables eviction for the block.
3135
        Note that a request has been registered against the block in
3136
        flush_key_blocks_int().
3137
      */
3138
      unreg_request(keycache, block, 1);
3139
    }
3140
3141
  } /* end of for ( ; cache != end ; cache++) */
3142
  return last_errno;
3143
}
3144
3145
3146
/*
3147
  flush all key blocks for a file to disk, but don't do any mutex locks.
3148
3149
  SYNOPSIS
3150
    flush_key_blocks_int()
3151
      keycache            pointer to a key cache data structure
3152
      file                handler for the file to flush to
3153
      flush_type          type of the flush
3154
3155
  NOTES
3156
    This function doesn't do any mutex locks because it needs to be called both
3157
    from flush_key_blocks and flush_all_key_blocks (the later one does the
3158
    mutex lock in the resize_key_cache() function).
3159
3160
    We do only care about changed blocks that exist when the function is
3161
    entered. We do not guarantee that all changed blocks of the file are
3162
    flushed if more blocks change while this function is running.
3163
3164
  RETURN
3165
    0   ok
3166
    1  error
3167
*/
3168
3169
static int flush_key_blocks_int(KEY_CACHE *keycache,
3170
				File file, enum flush_type type)
3171
{
3172
  BLOCK_LINK *cache_buff[FLUSH_CACHE],**cache;
3173
  int last_errno= 0;
3174
  int last_errcnt= 0;
3175
3176
  cache= cache_buff;
3177
  if (keycache->disk_blocks > 0 &&
3178
      (!my_disable_flush_key_blocks || type != FLUSH_KEEP))
3179
  {
3180
    /* Key cache exists and flush is not disabled */
3181
    int error= 0;
482 by Brian Aker
Remove uint.
3182
    uint32_t count= FLUSH_CACHE;
1 by brian
clean slate
3183
    BLOCK_LINK **pos,**end;
3184
    BLOCK_LINK *first_in_switch= NULL;
3185
    BLOCK_LINK *last_in_flush;
3186
    BLOCK_LINK *last_for_update;
3187
    BLOCK_LINK *block, *next;
3188
3189
    if (type != FLUSH_IGNORE_CHANGED)
3190
    {
3191
      /*
3192
         Count how many key blocks we have to cache to be able
3193
         to flush all dirty pages with minimum seek moves
3194
      */
3195
      count= 0;
3196
      for (block= keycache->changed_blocks[FILE_HASH(file)] ;
3197
           block ;
3198
           block= block->next_changed)
3199
      {
3200
        if ((block->hash_link->file == file) &&
3201
            !(block->status & BLOCK_IN_FLUSH))
3202
        {
3203
          count++;
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3204
          assert(count<= keycache->blocks_used);
1 by brian
clean slate
3205
        }
3206
      }
3207
      /*
3208
        Allocate a new buffer only if its bigger than the one we have.
3209
        Assure that we always have some entries for the case that new
3210
        changed blocks appear while we need to wait for something.
3211
      */
3212
      if ((count > FLUSH_CACHE) &&
3213
          !(cache= (BLOCK_LINK**) my_malloc(sizeof(BLOCK_LINK*)*count,
3214
                                            MYF(0))))
3215
        cache= cache_buff;
3216
      /*
3217
        After a restart there could be more changed blocks than now.
3218
        So we should not let count become smaller than the fixed buffer.
3219
      */
3220
      if (cache == cache_buff)
3221
        count= FLUSH_CACHE;
3222
    }
3223
3224
    /* Retrieve the blocks and write them to a buffer to be flushed */
3225
restart:
3226
    last_in_flush= NULL;
3227
    last_for_update= NULL;
3228
    end= (pos= cache)+count;
3229
    for (block= keycache->changed_blocks[FILE_HASH(file)] ;
3230
         block ;
3231
         block= next)
3232
    {
3233
      next= block->next_changed;
3234
      if (block->hash_link->file == file)
3235
      {
3236
        if (!(block->status & (BLOCK_IN_FLUSH | BLOCK_FOR_UPDATE)))
3237
        {
3238
          /*
3239
            Note: The special handling of BLOCK_IN_SWITCH is obsolete
3240
            since we set BLOCK_IN_FLUSH if the eviction includes a
3241
            flush. It can be removed in a later version.
3242
          */
3243
          if (!(block->status & BLOCK_IN_SWITCH))
3244
          {
3245
            /*
3246
              We care only for the blocks for which flushing was not
3247
              initiated by another thread and which are not in eviction.
3248
              Registering a request on the block unlinks it from the LRU
3249
              ring and protects against eviction.
3250
            */
3251
            reg_requests(keycache, block, 1);
3252
            if (type != FLUSH_IGNORE_CHANGED)
3253
            {
3254
              /* It's not a temporary file */
3255
              if (pos == end)
3256
              {
3257
                /*
3258
                  This should happen relatively seldom. Remove the
3259
                  request because we won't do anything with the block
3260
                  but restart and pick it again in the next iteration.
3261
                */
3262
                unreg_request(keycache, block, 0);
3263
                /*
3264
                  This happens only if there is not enough
3265
                  memory for the big block
3266
                */
3267
                if ((error= flush_cached_blocks(keycache, file, cache,
3268
                                                end,type)))
3269
                {
3270
                  /* Do not loop infinitely trying to flush in vain. */
3271
                  if ((last_errno == error) && (++last_errcnt > 5))
3272
                    goto err;
3273
                  last_errno= error;
3274
                }
3275
                /*
3276
                  Restart the scan as some other thread might have changed
3277
                  the changed blocks chain: the blocks that were in switch
3278
                  state before the flush started have to be excluded
3279
                */
3280
                goto restart;
3281
              }
3282
              /*
3283
                Mark the block with BLOCK_IN_FLUSH in order not to let
3284
                other threads to use it for new pages and interfere with
3285
                our sequence of flushing dirty file pages. We must not
3286
                set this flag before actually putting the block on the
3287
                write burst array called 'cache'.
3288
              */
3289
              block->status|= BLOCK_IN_FLUSH;
3290
              /* Add block to the array for a write burst. */
3291
              *pos++= block;
3292
            }
3293
            else
3294
            {
3295
              /* It's a temporary file */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3296
              assert(!(block->status & BLOCK_REASSIGNED));
1 by brian
clean slate
3297
3298
              /*
3299
                free_block() must not be called with BLOCK_CHANGED. Note
3300
                that we must not change the BLOCK_CHANGED flag outside of
3301
                link_to_file_list() so that it is always in the correct
3302
                queue and the *blocks_changed counters are correct.
3303
              */
3304
              link_to_file_list(keycache, block, file, 1);
3305
              if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH)))
3306
              {
3307
                /* A request has been registered against the block above. */
3308
                free_block(keycache, block);
3309
              }
3310
              else
3311
              {
3312
                /*
3313
                  Link the block into the LRU ring if it's the last
3314
                  submitted request for the block. This enables eviction
3315
                  for the block. A request has been registered against
3316
                  the block above.
3317
                */
3318
                unreg_request(keycache, block, 1);
3319
              }
3320
            }
3321
          }
3322
          else
3323
          {
3324
            /*
3325
              Link the block into a list of blocks 'in switch'.
3326
3327
              WARNING: Here we introduce a place where a changed block
3328
              is not in the changed_blocks hash! This is acceptable for
3329
              a BLOCK_IN_SWITCH. Never try this for another situation.
3330
              Other parts of the key cache code rely on changed blocks
3331
              being in the changed_blocks hash.
3332
            */
3333
            unlink_changed(block);
3334
            link_changed(block, &first_in_switch);
3335
          }
3336
        }
3337
        else if (type != FLUSH_KEEP)
3338
        {
3339
          /*
3340
            During the normal flush at end of statement (FLUSH_KEEP) we
3341
            do not need to ensure that blocks in flush or update by
3342
            other threads are flushed. They will be flushed by them
3343
            later. In all other cases we must assure that we do not have
3344
            any changed block of this file in the cache when this
3345
            function returns.
3346
          */
3347
          if (block->status & BLOCK_IN_FLUSH)
3348
          {
3349
            /* Remember the last block found to be in flush. */
3350
            last_in_flush= block;
3351
          }
3352
          else
3353
          {
3354
            /* Remember the last block found to be selected for update. */
3355
            last_for_update= block;
3356
          }
3357
        }
3358
      }
3359
    }
3360
    if (pos != cache)
3361
    {
3362
      if ((error= flush_cached_blocks(keycache, file, cache, pos, type)))
3363
      {
3364
        /* Do not loop inifnitely trying to flush in vain. */
3365
        if ((last_errno == error) && (++last_errcnt > 5))
3366
          goto err;
3367
        last_errno= error;
3368
      }
3369
      /*
3370
        Do not restart here during the normal flush at end of statement
3371
        (FLUSH_KEEP). We have now flushed at least all blocks that were
3372
        changed when entering this function. In all other cases we must
3373
        assure that we do not have any changed block of this file in the
3374
        cache when this function returns.
3375
      */
3376
      if (type != FLUSH_KEEP)
3377
        goto restart;
3378
    }
3379
    if (last_in_flush)
3380
    {
3381
      /*
3382
        There are no blocks to be flushed by this thread, but blocks in
3383
        flush by other threads. Wait until one of the blocks is flushed.
3384
        Re-check the condition for last_in_flush. We may have unlocked
3385
        the cache_lock in flush_cached_blocks(). The state of the block
3386
        could have changed.
3387
      */
3388
      if (last_in_flush->status & BLOCK_IN_FLUSH)
3389
        wait_on_queue(&last_in_flush->wqueue[COND_FOR_SAVED],
3390
                      &keycache->cache_lock);
3391
      /* Be sure not to lose a block. They may be flushed in random order. */
3392
      goto restart;
3393
    }
3394
    if (last_for_update)
3395
    {
3396
      /*
3397
        There are no blocks to be flushed by this thread, but blocks for
3398
        update by other threads. Wait until one of the blocks is updated.
3399
        Re-check the condition for last_for_update. We may have unlocked
3400
        the cache_lock in flush_cached_blocks(). The state of the block
3401
        could have changed.
3402
      */
3403
      if (last_for_update->status & BLOCK_FOR_UPDATE)
3404
        wait_on_queue(&last_for_update->wqueue[COND_FOR_REQUESTED],
3405
                      &keycache->cache_lock);
3406
      /* The block is now changed. Flush it. */
3407
      goto restart;
3408
    }
3409
3410
    /*
3411
      Wait until the list of blocks in switch is empty. The threads that
3412
      are switching these blocks will relink them to clean file chains
3413
      while we wait and thus empty the 'first_in_switch' chain.
3414
    */
3415
    while (first_in_switch)
3416
    {
3417
      wait_on_queue(&first_in_switch->wqueue[COND_FOR_SAVED],
3418
                    &keycache->cache_lock);
3419
      /*
3420
        Do not restart here. We have flushed all blocks that were
3421
        changed when entering this function and were not marked for
3422
        eviction. Other threads have now flushed all remaining blocks in
3423
        the course of their eviction.
3424
      */
3425
    }
3426
3427
    if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE))
3428
    {
3429
      BLOCK_LINK *last_for_update= NULL;
3430
      BLOCK_LINK *last_in_switch= NULL;
482 by Brian Aker
Remove uint.
3431
      uint32_t total_found= 0;
3432
      uint32_t found;
1 by brian
clean slate
3433
3434
      /*
3435
        Finally free all clean blocks for this file.
3436
        During resize this may be run by two threads in parallel.
3437
      */
3438
      do
3439
      {
3440
        found= 0;
3441
        for (block= keycache->file_blocks[FILE_HASH(file)] ;
3442
             block ;
3443
             block= next)
3444
        {
3445
          /* Remember the next block. After freeing we cannot get at it. */
3446
          next= block->next_changed;
3447
3448
          /* Changed blocks cannot appear in the file_blocks hash. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3449
          assert(!(block->status & BLOCK_CHANGED));
1 by brian
clean slate
3450
          if (block->hash_link->file == file)
3451
          {
3452
            /* We must skip blocks that will be changed. */
3453
            if (block->status & BLOCK_FOR_UPDATE)
3454
            {
3455
              last_for_update= block;
3456
              continue;
3457
            }
3458
3459
            /*
3460
              We must not free blocks in eviction (BLOCK_IN_EVICTION |
3461
              BLOCK_IN_SWITCH) or blocks intended to be freed
3462
              (BLOCK_REASSIGNED).
3463
            */
3464
            if (!(block->status & (BLOCK_IN_EVICTION | BLOCK_IN_SWITCH |
3465
                                   BLOCK_REASSIGNED)))
3466
            {
236.2.2 by rbradfor
Using correct coding standards for variable initialization
3467
              struct st_hash_link *next_hash_link= NULL;
3468
              my_off_t            next_diskpos= 0;
3469
              File                next_file= 0;
482 by Brian Aker
Remove uint.
3470
              uint32_t                next_status= 0;
3471
              uint32_t                hash_requests= 0;
1 by brian
clean slate
3472
3473
              total_found++;
3474
              found++;
51.1.123 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3475
              assert(found <= keycache->blocks_used);
1 by brian
clean slate
3476
3477
              /*
3478
                Register a request. This unlinks the block from the LRU
3479
                ring and protects it against eviction. This is required
3480
                by free_block().
3481
              */
3482
              reg_requests(keycache, block, 1);
3483
3484
              /*
3485
                free_block() may need to wait for readers of the block.
3486
                This is the moment where the other thread can move the
3487
                'next' block from the chain. free_block() needs to wait
3488
                if there are requests for the block pending.
3489
              */
3490
              if (next && (hash_requests= block->hash_link->requests))
3491
              {
3492
                /* Copy values from the 'next' block and its hash_link. */
3493
                next_status=    next->status;
3494
                next_hash_link= next->hash_link;
3495
                next_diskpos=   next_hash_link->diskpos;
3496
                next_file=      next_hash_link->file;
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3497
                assert(next == next_hash_link->block);
1 by brian
clean slate
3498
              }
3499
3500
              free_block(keycache, block);
3501
              /*
3502
                If we had to wait and the state of the 'next' block
3503
                changed, break the inner loop. 'next' may no longer be
3504
                part of the current chain.
3505
3506
                We do not want to break the loop after every free_block(),
3507
                not even only after waits. The chain might be quite long
3508
                and contain blocks for many files. Traversing it again and
3509
                again to find more blocks for this file could become quite
3510
                inefficient.
3511
              */
3512
              if (next && hash_requests &&
3513
                  ((next_status    != next->status) ||
3514
                   (next_hash_link != next->hash_link) ||
3515
                   (next_file      != next_hash_link->file) ||
3516
                   (next_diskpos   != next_hash_link->diskpos) ||
3517
                   (next           != next_hash_link->block)))
3518
                break;
3519
            }
3520
            else
3521
            {
3522
              last_in_switch= block;
3523
            }
3524
          }
3525
        } /* end for block in file_blocks */
3526
      } while (found);
3527
3528
      /*
3529
        If any clean block has been found, we may have waited for it to
3530
        become free. In this case it could be possible that another clean
3531
        block became dirty. This is possible if the write request existed
3532
        before the flush started (BLOCK_FOR_UPDATE). Re-check the hashes.
3533
      */
3534
      if (total_found)
3535
        goto restart;
3536
3537
      /*
3538
        To avoid an infinite loop, wait until one of the blocks marked
3539
        for update is updated.
3540
      */
3541
      if (last_for_update)
3542
      {
3543
        /* We did not wait. Block must not have changed status. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3544
        assert(last_for_update->status & BLOCK_FOR_UPDATE);
1 by brian
clean slate
3545
        wait_on_queue(&last_for_update->wqueue[COND_FOR_REQUESTED],
3546
                      &keycache->cache_lock);
3547
        goto restart;
3548
      }
3549
3550
      /*
3551
        To avoid an infinite loop wait until one of the blocks marked
3552
        for eviction is switched.
3553
      */
3554
      if (last_in_switch)
3555
      {
3556
        /* We did not wait. Block must not have changed status. */
51.1.124 by Jay Pipes
Round 3 - removal custom KEYCACHE_DEBUG_LOG and fail_test code
3557
        assert(last_in_switch->status & (BLOCK_IN_EVICTION |
1 by brian
clean slate
3558
                                              BLOCK_IN_SWITCH |
3559
                                              BLOCK_REASSIGNED));
3560
        wait_on_queue(&last_in_switch->wqueue[COND_FOR_SAVED],
3561
                      &keycache->cache_lock);
3562
        goto restart;
3563
      }
3564
3565
    } /* if (! (type == FLUSH_KEEP || type == FLUSH_FORCE_WRITE)) */
3566
3567
  } /* if (keycache->disk_blocks > 0 */
3568
3569
err:
3570
  if (cache != cache_buff)
481 by Brian Aker
Remove all of uchar.
3571
    free((unsigned char*) cache);
1 by brian
clean slate
3572
  if (last_errno)
3573
    errno=last_errno;                /* Return first error */
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3574
  return(last_errno != 0);
1 by brian
clean slate
3575
}
3576
3577
3578
/*
3579
  Flush all blocks for a file to disk
3580
3581
  SYNOPSIS
3582
3583
    flush_key_blocks()
3584
      keycache            pointer to a key cache data structure
3585
      file                handler for the file to flush to
3586
      flush_type          type of the flush
3587
3588
  RETURN
3589
    0   ok
3590
    1  error
3591
*/
3592
3593
int flush_key_blocks(KEY_CACHE *keycache,
3594
                     File file, enum flush_type type)
3595
{
3596
  int res= 0;
3597
3598
  if (!keycache->key_cache_inited)
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3599
    return(0);
1 by brian
clean slate
3600
3601
  keycache_pthread_mutex_lock(&keycache->cache_lock);
3602
  /* While waiting for lock, keycache could have been ended. */
3603
  if (keycache->disk_blocks > 0)
3604
  {
3605
    inc_counter_for_resize_op(keycache);
3606
    res= flush_key_blocks_int(keycache, file, type);
3607
    dec_counter_for_resize_op(keycache);
3608
  }
3609
  keycache_pthread_mutex_unlock(&keycache->cache_lock);
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3610
  return(res);
1 by brian
clean slate
3611
}
3612
3613
3614
/*
3615
  Flush all blocks in the key cache to disk.
3616
3617
  SYNOPSIS
3618
    flush_all_key_blocks()
3619
      keycache                  pointer to key cache root structure
3620
3621
  DESCRIPTION
3622
3623
    Flushing of the whole key cache is done in two phases.
3624
3625
    1. Flush all changed blocks, waiting for them if necessary. Loop
3626
    until there is no changed block left in the cache.
3627
3628
    2. Free all clean blocks. Normally this means free all blocks. The
3629
    changed blocks were flushed in phase 1 and became clean. However we
3630
    may need to wait for blocks that are read by other threads. While we
3631
    wait, a clean block could become changed if that operation started
3632
    before the resize operation started. To be safe we must restart at
3633
    phase 1.
3634
3635
    When we can run through the changed_blocks and file_blocks hashes
3636
    without finding a block any more, then we are done.
3637
3638
    Note that we hold keycache->cache_lock all the time unless we need
3639
    to wait for something.
3640
3641
  RETURN
3642
    0           OK
3643
    != 0        Error
3644
*/
3645
3646
static int flush_all_key_blocks(KEY_CACHE *keycache)
3647
{
3648
  BLOCK_LINK    *block;
482 by Brian Aker
Remove uint.
3649
  uint32_t          total_found;
3650
  uint32_t          found;
3651
  uint32_t          idx;
1 by brian
clean slate
3652
3653
  do
3654
  {
3655
    safe_mutex_assert_owner(&keycache->cache_lock);
3656
    total_found= 0;
3657
3658
    /*
3659
      Phase1: Flush all changed blocks, waiting for them if necessary.
3660
      Loop until there is no changed block left in the cache.
3661
    */
3662
    do
3663
    {
3664
      found= 0;
3665
      /* Step over the whole changed_blocks hash array. */
3666
      for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++)
3667
      {
3668
        /*
3669
          If an array element is non-empty, use the first block from its
3670
          chain to find a file for flush. All changed blocks for this
3671
          file are flushed. So the same block will not appear at this
3672
          place again with the next iteration. New writes for blocks are
3673
          not accepted during the flush. If multiple files share the
3674
          same hash bucket, one of them will be flushed per iteration
3675
          of the outer loop of phase 1.
3676
        */
3677
        if ((block= keycache->changed_blocks[idx]))
3678
        {
3679
          found++;
3680
          /*
3681
            Flush dirty blocks but do not free them yet. They can be used
3682
            for reading until all other blocks are flushed too.
3683
          */
3684
          if (flush_key_blocks_int(keycache, block->hash_link->file,
3685
                                   FLUSH_FORCE_WRITE))
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3686
            return(1);
1 by brian
clean slate
3687
        }
3688
      }
3689
3690
    } while (found);
3691
3692
    /*
3693
      Phase 2: Free all clean blocks. Normally this means free all
3694
      blocks. The changed blocks were flushed in phase 1 and became
3695
      clean. However we may need to wait for blocks that are read by
3696
      other threads. While we wait, a clean block could become changed
3697
      if that operation started before the resize operation started. To
3698
      be safe we must restart at phase 1.
3699
    */
3700
    do
3701
    {
3702
      found= 0;
3703
      /* Step over the whole file_blocks hash array. */
3704
      for (idx= 0; idx < CHANGED_BLOCKS_HASH; idx++)
3705
      {
3706
        /*
3707
          If an array element is non-empty, use the first block from its
3708
          chain to find a file for flush. All blocks for this file are
3709
          freed. So the same block will not appear at this place again
3710
          with the next iteration. If multiple files share the
3711
          same hash bucket, one of them will be flushed per iteration
3712
          of the outer loop of phase 2.
3713
        */
3714
        if ((block= keycache->file_blocks[idx]))
3715
        {
3716
          total_found++;
3717
          found++;
3718
          if (flush_key_blocks_int(keycache, block->hash_link->file,
3719
                                   FLUSH_RELEASE))
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3720
            return(1);
1 by brian
clean slate
3721
        }
3722
      }
3723
3724
    } while (found);
3725
3726
    /*
3727
      If any clean block has been found, we may have waited for it to
3728
      become free. In this case it could be possible that another clean
3729
      block became dirty. This is possible if the write request existed
3730
      before the resize started (BLOCK_FOR_UPDATE). Re-check the hashes.
3731
    */
3732
  } while (total_found);
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3733
  return(0);
1 by brian
clean slate
3734
}
3735
3736
3737
/*
3738
  Reset the counters of a key cache.
3739
3740
  SYNOPSIS
3741
    reset_key_cache_counters()
3742
    name       the name of a key cache
3743
    key_cache  pointer to the key kache to be reset
3744
3745
  DESCRIPTION
3746
   This procedure is used by process_key_caches() to reset the counters of all
3747
   currently used key caches, both the default one and the named ones.
3748
3749
  RETURN
3750
    0 on success (always because it can't fail)
3751
*/
3752
3753
int reset_key_cache_counters(const char *name __attribute__((unused)),
3754
                             KEY_CACHE *key_cache)
3755
{
3756
  if (!key_cache->key_cache_inited)
3757
  {
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3758
    return(0);
1 by brian
clean slate
3759
  }
3760
  key_cache->global_blocks_changed= 0;   /* Key_blocks_not_flushed */
3761
  key_cache->global_cache_r_requests= 0; /* Key_read_requests */
3762
  key_cache->global_cache_read= 0;       /* Key_reads */
3763
  key_cache->global_cache_w_requests= 0; /* Key_write_requests */
3764
  key_cache->global_cache_write= 0;      /* Key_writes */
51.1.122 by Jay Pipes
Round 2 - removal custom KEYCACHE_DEBUG and THREAD_TRACE from keycache
3765
  return(0);
1 by brian
clean slate
3766
}
3767
3768
#if defined(KEYCACHE_TIMEOUT)
3769
511.2.7 by Monty Taylor
We pass -Wunused-macros now!
3770
3771
static inline
3772
unsigned int hash_link_number(HASH_LINK *hash_link, KEY_CACHE *keycache)
3773
{
3774
  return ((unsigned int) (((char*)hash_link-(char *) keycache->hash_link_root)/
3775
		  sizeof(HASH_LINK)));
3776
}
3777
3778
static inline
3779
unsigned int block_number(BLOCK_LINK *block, KEY_CACHE *keycache)
3780
{
3781
  return ((unsigned int) (((char*)block-(char *)keycache->block_root)/
3782
		  sizeof(BLOCK_LINK)));
3783
}
3784
3785
1 by brian
clean slate
3786
#define KEYCACHE_DUMP_FILE  "keycache_dump.txt"
3787
#define MAX_QUEUE_LEN  100
3788
3789
3790
static void keycache_dump(KEY_CACHE *keycache)
3791
{
3792
  FILE *keycache_dump_file=fopen(KEYCACHE_DUMP_FILE, "w");
3793
  struct st_my_thread_var *last;
3794
  struct st_my_thread_var *thread;
3795
  BLOCK_LINK *block;
3796
  HASH_LINK *hash_link;
3797
  KEYCACHE_PAGE *page;
482 by Brian Aker
Remove uint.
3798
  uint32_t i;
1 by brian
clean slate
3799
3800
  fprintf(keycache_dump_file, "thread:%u\n", thread->id);
3801
3802
  i=0;
3803
  thread=last=waiting_for_hash_link.last_thread;
3804
  fprintf(keycache_dump_file, "queue of threads waiting for hash link\n");
3805
  if (thread)
3806
    do
3807
    {
3808
      thread=thread->next;
3809
      page= (KEYCACHE_PAGE *) thread->opt_info;
3810
      fprintf(keycache_dump_file,
3811
              "thread:%u, (file,filepos)=(%u,%lu)\n",
303 by Brian Aker
First pass in removing ulong from MyISAM
3812
              thread->id,(uint) page->file,(uint32_t) page->filepos);
1 by brian
clean slate
3813
      if (++i == MAX_QUEUE_LEN)
3814
        break;
3815
    }
3816
    while (thread != last);
3817
3818
  i=0;
3819
  thread=last=waiting_for_block.last_thread;
3820
  fprintf(keycache_dump_file, "queue of threads waiting for block\n");
3821
  if (thread)
3822
    do
3823
    {
3824
      thread=thread->next;
3825
      hash_link= (HASH_LINK *) thread->opt_info;
3826
      fprintf(keycache_dump_file,
511.2.7 by Monty Taylor
We pass -Wunused-macros now!
3827
	      "thread:%u hash_link:%u (file,filepos)=(%u,%u)\n",
3828
	      thread->id, (uint) hash_link_number(hash_link, keycache),
3829
	      (uint) hash_link->file,(uint32_t) hash_link->diskpos);
1 by brian
clean slate
3830
      if (++i == MAX_QUEUE_LEN)
3831
        break;
3832
    }
3833
    while (thread != last);
3834
3835
  for (i=0 ; i< keycache->blocks_used ; i++)
3836
  {
3837
    int j;
3838
    block= &keycache->block_root[i];
3839
    hash_link= block->hash_link;
3840
    fprintf(keycache_dump_file,
511.2.7 by Monty Taylor
We pass -Wunused-macros now!
3841
	    "block:%u hash_link:%d status:%x #requests=%u "
3842
	    "waiting_for_readers:%d\n",
3843
	    i, (int) (hash_link ? hash_link_number(hash_link, keycache) : -1),
3844
	    block->status, block->requests, block->condvar ? 1 : 0);
1 by brian
clean slate
3845
    for (j=0 ; j < 2; j++)
3846
    {
3847
      KEYCACHE_WQUEUE *wqueue=&block->wqueue[j];
3848
      thread= last= wqueue->last_thread;
3849
      fprintf(keycache_dump_file, "queue #%d\n", j);
3850
      if (thread)
3851
      {
3852
        do
3853
        {
3854
          thread=thread->next;
3855
          fprintf(keycache_dump_file,
3856
                  "thread:%u\n", thread->id);
3857
          if (++i == MAX_QUEUE_LEN)
3858
            break;
3859
        }
3860
        while (thread != last);
3861
      }
3862
    }
3863
  }
3864
  fprintf(keycache_dump_file, "LRU chain:");
3865
  block= keycache= used_last;
3866
  if (block)
3867
  {
3868
    do
3869
    {
3870
      block= block->next_used;
3871
      fprintf(keycache_dump_file,
511.2.7 by Monty Taylor
We pass -Wunused-macros now!
3872
	      "block:%u, ", block_number(block, keycache));
1 by brian
clean slate
3873
    }
3874
    while (block != keycache->used_last);
3875
  }
3876
  fprintf(keycache_dump_file, "\n");
3877
3878
  fclose(keycache_dump_file);
3879
}
3880
3881
static int keycache_pthread_cond_wait(pthread_cond_t *cond,
3882
                                      pthread_mutex_t *mutex)
3883
{
3884
  int rc;
3885
  struct timeval  now;            /* time when we started waiting        */
3886
  struct timespec timeout;        /* timeout value for the wait function */
3887
  struct timezone tz;
3888
3889
  /* Get current time */
3890
  gettimeofday(&now, &tz);
3891
  /* Prepare timeout value */
3892
  timeout.tv_sec= now.tv_sec + KEYCACHE_TIMEOUT;
3893
 /*
3894
   timeval uses microseconds.
3895
   timespec uses nanoseconds.
3896
   1 nanosecond = 1000 micro seconds
3897
 */
3898
  timeout.tv_nsec= now.tv_usec * 1000;
3899
  rc= pthread_cond_timedwait(cond, mutex, &timeout);
3900
  if (rc == ETIMEDOUT || rc == ETIME)
3901
  {
3902
    keycache_dump();
3903
  }
3904
3905
  assert(rc != ETIMEDOUT);
3906
  return rc;
3907
}
3908
#endif /* defined(KEYCACHE_TIMEOUT) */