38
14
#include "os0thread.h"
39
15
#include "mem0mem.h"
40
16
#include "srv0srv.h"
41
#include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */
44
IMPLEMENTATION OF THE RW_LOCK
45
=============================
46
The status of a rw_lock is held in lock_word. The initial value of lock_word is
47
X_LOCK_DECR. lock_word is decremented by 1 for each s-lock and by X_LOCK_DECR
48
for each x-lock. This describes the lock state for each value of lock_word:
50
lock_word == X_LOCK_DECR: Unlocked.
51
0 < lock_word < X_LOCK_DECR: Read locked, no waiting writers.
52
(X_LOCK_DECR - lock_word) is the
53
number of readers that hold the lock.
54
lock_word == 0: Write locked
55
-X_LOCK_DECR < lock_word < 0: Read locked, with a waiting writer.
56
(-lock_word) is the number of readers
58
lock_word <= -X_LOCK_DECR: Recursively write locked. lock_word has been
59
decremented by X_LOCK_DECR once for each lock,
60
so the number of locks is:
61
((-lock_word) / X_LOCK_DECR) + 1
62
When lock_word <= -X_LOCK_DECR, we also know that lock_word % X_LOCK_DECR == 0:
63
other values of lock_word are invalid.
65
The lock_word is always read and updated atomically and consistently, so that
66
it always represents the state of the lock, and the state of the lock changes
67
with a single atomic operation. This lock_word holds all of the information
68
that a thread needs in order to determine if it is eligible to gain the lock
69
or if it must spin or sleep. The one exception to this is that writer_thread
70
must be verified before recursive write locks: to solve this scenario, we make
71
writer_thread readable by all threads, but only writeable by the x-lock holder.
73
The other members of the lock obey the following rules to remain consistent:
75
recursive: This and the writer_thread field together control the
76
behaviour of recursive x-locking.
77
lock->recursive must be FALSE in following states:
78
1) The writer_thread contains garbage i.e.: the
79
lock has just been initialized.
80
2) The lock is not x-held and there is no
81
x-waiter waiting on WAIT_EX event.
82
3) The lock is x-held or there is an x-waiter
83
waiting on WAIT_EX event but the 'pass' value
85
lock->recursive is TRUE iff:
86
1) The lock is x-held or there is an x-waiter
87
waiting on WAIT_EX event and the 'pass' value
89
This flag must be set after the writer_thread field
90
has been updated with a memory ordering barrier.
91
It is unset before the lock_word has been incremented.
92
writer_thread: Is used only in recursive x-locking. Can only be safely
93
read iff lock->recursive flag is TRUE.
94
This field is uninitialized at lock creation time and
95
is updated atomically when x-lock is acquired or when
96
move_ownership is called. A thread is only allowed to
97
set the value of this field to it's thread_id i.e.: a
98
thread cannot set writer_thread to some other thread's
100
waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
101
signals, it should only be set to 1 when there are threads
102
waiting on event. Must be 1 when a writer starts waiting to
103
ensure the current x-locking thread sends a wake-up signal
104
during unlock. May only be reset to 0 immediately before a
105
a wake-up signal is sent to event. On most platforms, a
106
memory barrier is required after waiters is set, and before
107
verifying lock_word is still held, to ensure some unlocker
108
really does see the flags new value.
109
event: Threads wait on event for read or writer lock when another
110
thread has an x-lock or an x-lock reservation (wait_ex). A
111
thread may only wait on event after performing the following
113
(1) Record the counter value of event (with os_event_reset).
114
(2) Set waiters to 1.
115
(3) Verify lock_word <= 0.
116
(1) must come before (2) to ensure signal is not missed.
117
(2) must come before (3) to ensure a signal is sent.
118
These restrictions force the above ordering.
119
Immediately before sending the wake-up signal, we should:
120
(1) Verify lock_word == X_LOCK_DECR (unlocked)
121
(2) Reset waiters to 0.
122
wait_ex_event: A thread may only wait on the wait_ex_event after it has
123
performed the following actions in order:
124
(1) Decrement lock_word by X_LOCK_DECR.
125
(2) Record counter value of wait_ex_event (os_event_reset,
126
called from sync_array_reserve_cell).
127
(3) Verify that lock_word < 0.
128
(1) must come first to ensures no other threads become reader
129
or next writer, and notifies unlocker that signal must be sent.
130
(2) must come before (3) to ensure the signal is not missed.
131
These restrictions force the above ordering.
132
Immediately before sending the wake-up signal, we should:
133
Verify lock_word == 0 (waiting thread holds x_lock)
137
/** number of spin waits on rw-latches,
138
resulted during shared (read) locks */
139
UNIV_INTERN ib_int64_t rw_s_spin_wait_count = 0;
140
/** number of spin loop rounds on rw-latches,
141
resulted during shared (read) locks */
142
UNIV_INTERN ib_int64_t rw_s_spin_round_count = 0;
144
/** number of OS waits on rw-latches,
145
resulted during shared (read) locks */
146
UNIV_INTERN ib_int64_t rw_s_os_wait_count = 0;
148
/** number of unlocks (that unlock shared locks),
149
set only when UNIV_SYNC_PERF_STAT is defined */
150
UNIV_INTERN ib_int64_t rw_s_exit_count = 0;
152
/** number of spin waits on rw-latches,
153
resulted during exclusive (write) locks */
154
UNIV_INTERN ib_int64_t rw_x_spin_wait_count = 0;
155
/** number of spin loop rounds on rw-latches,
156
resulted during exclusive (write) locks */
157
UNIV_INTERN ib_int64_t rw_x_spin_round_count = 0;
159
/** number of OS waits on rw-latches,
160
resulted during exclusive (write) locks */
161
UNIV_INTERN ib_int64_t rw_x_os_wait_count = 0;
163
/** number of unlocks (that unlock exclusive locks),
164
set only when UNIV_SYNC_PERF_STAT is defined */
165
UNIV_INTERN ib_int64_t rw_x_exit_count = 0;
18
/* number of system calls made during shared latching */
19
UNIV_INTERN ulint rw_s_system_call_count = 0;
21
/* number of spin waits on rw-latches,
22
resulted during shared (read) locks */
23
UNIV_INTERN ulint rw_s_spin_wait_count = 0;
25
/* number of OS waits on rw-latches,
26
resulted during shared (read) locks */
27
UNIV_INTERN ulint rw_s_os_wait_count = 0;
29
/* number of unlocks (that unlock shared locks),
30
set only when UNIV_SYNC_PERF_STAT is defined */
31
UNIV_INTERN ulint rw_s_exit_count = 0;
33
/* number of system calls made during exclusive latching */
34
UNIV_INTERN ulint rw_x_system_call_count = 0;
36
/* number of spin waits on rw-latches,
37
resulted during exclusive (write) locks */
38
UNIV_INTERN ulint rw_x_spin_wait_count = 0;
40
/* number of OS waits on rw-latches,
41
resulted during exclusive (write) locks */
42
UNIV_INTERN ulint rw_x_os_wait_count = 0;
44
/* number of unlocks (that unlock exclusive locks),
45
set only when UNIV_SYNC_PERF_STAT is defined */
46
UNIV_INTERN ulint rw_x_exit_count = 0;
167
48
/* The global list of rw-locks */
168
49
UNIV_INTERN rw_lock_list_t rw_lock_list;
169
50
UNIV_INTERN mutex_t rw_lock_list_mutex;
171
#ifdef UNIV_PFS_MUTEX
172
UNIV_INTERN mysql_pfs_key_t rw_lock_list_mutex_key;
173
UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key;
174
#endif /* UNIV_PFS_MUTEX */
176
52
#ifdef UNIV_SYNC_DEBUG
177
53
/* The global mutex which protects debug info lists of all rw-locks.
178
54
To modify the debug info list of an rw-lock, this mutex has to be
179
55
acquired in addition to the mutex protecting the lock. */
181
57
UNIV_INTERN mutex_t rw_lock_debug_mutex;
183
# ifdef UNIV_PFS_MUTEX
184
UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key;
187
58
/* If deadlock detection does not get immediately the mutex,
188
59
it may wait for this event */
189
60
UNIV_INTERN os_event_t rw_lock_debug_event;
190
61
/* This is set to TRUE, if there may be waiters for the event */
191
62
UNIV_INTERN ibool rw_lock_debug_waiters;
193
/******************************************************************//**
64
/**********************************************************************
194
65
Creates a debug info struct. */
197
68
rw_lock_debug_create(void);
198
69
/*======================*/
199
/******************************************************************//**
70
/**********************************************************************
200
71
Frees a debug info struct. */
237
107
rw_lock_create_func(
238
108
/*================*/
239
rw_lock_t* lock, /*!< in: pointer to memory */
109
rw_lock_t* lock, /* in: pointer to memory */
240
110
#ifdef UNIV_DEBUG
241
111
# ifdef UNIV_SYNC_DEBUG
242
ulint level, /*!< in: level */
112
ulint level, /* in: level */
243
113
# endif /* UNIV_SYNC_DEBUG */
244
const char* cmutex_name, /*!< in: mutex name */
114
const char* cmutex_name, /* in: mutex name */
245
115
#endif /* UNIV_DEBUG */
246
const char* cfile_name, /*!< in: file name where created */
247
ulint cline) /*!< in: file line where created */
116
const char* cfile_name, /* in: file name where created */
117
ulint cline) /* in: file line where created */
249
119
/* If this is the very first time a synchronization object is
250
120
created, then the following call initializes the sync system. */
252
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
253
mutex_create(rw_lock_mutex_key, rw_lock_get_mutex(lock),
254
SYNC_NO_ORDER_CHECK);
122
mutex_create(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK);
256
124
lock->mutex.cfile_name = cfile_name;
257
125
lock->mutex.cline = cline;
259
ut_d(lock->mutex.cmutex_name = cmutex_name);
260
ut_d(lock->mutex.mutex_type = 1);
261
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
263
UT_NOT_USED(cmutex_name);
265
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
267
lock->lock_word = X_LOCK_DECR;
270
/* We set this value to signify that lock->writer_thread
271
contains garbage at initialization and cannot be used for
272
recursive x-locking. */
273
lock->recursive = FALSE;
127
#if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP
128
lock->mutex.cmutex_name = cmutex_name;
129
lock->mutex.mutex_type = 1;
130
#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
132
rw_lock_set_waiters(lock, 0);
133
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
134
lock->writer_count = 0;
135
rw_lock_set_reader_count(lock, 0);
137
lock->writer_is_wait_ex = FALSE;
275
139
#ifdef UNIV_SYNC_DEBUG
276
140
UT_LIST_INIT(lock->debug_list);
278
142
lock->level = level;
279
143
#endif /* UNIV_SYNC_DEBUG */
281
ut_d(lock->magic_n = RW_LOCK_MAGIC_N);
145
lock->magic_n = RW_LOCK_MAGIC_N;
283
147
lock->cfile_name = cfile_name;
284
148
lock->cline = (unsigned int) cline;
286
lock->count_os_wait = 0;
287
150
lock->last_s_file_name = "not yet reserved";
288
151
lock->last_x_file_name = "not yet reserved";
289
152
lock->last_s_line = 0;
290
153
lock->last_x_line = 0;
291
154
lock->event = os_event_create(NULL);
292
157
lock->wait_ex_event = os_event_create(NULL);
294
160
mutex_enter(&rw_lock_list_mutex);
296
ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL
297
|| UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N);
162
if (UT_LIST_GET_LEN(rw_lock_list) > 0) {
163
ut_a(UT_LIST_GET_FIRST(rw_lock_list)->magic_n
299
167
UT_LIST_ADD_FIRST(list, rw_lock_list, lock);
301
169
mutex_exit(&rw_lock_list_mutex);
304
/******************************************************************//**
172
/**********************************************************************
305
173
Calling this function is obligatory only if the memory buffer containing
306
174
the rw-lock is freed. Removes an rw-lock object from the global list. The
307
175
rw-lock is checked to be in the non-locked state. */
312
rw_lock_t* lock) /*!< in: rw-lock */
180
rw_lock_t* lock) /* in: rw-lock */
314
182
ut_ad(rw_lock_validate(lock));
315
ut_a(lock->lock_word == X_LOCK_DECR);
317
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
183
ut_a(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED);
184
ut_a(rw_lock_get_waiters(lock) == 0);
185
ut_a(rw_lock_get_reader_count(lock) == 0);
318
189
mutex_free(rw_lock_get_mutex(lock));
319
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
321
191
mutex_enter(&rw_lock_list_mutex);
322
192
os_event_free(lock->event);
324
195
os_event_free(lock->wait_ex_event);
326
ut_ad(UT_LIST_GET_PREV(list, lock) == NULL
327
|| UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
328
ut_ad(UT_LIST_GET_NEXT(list, lock) == NULL
329
|| UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);
198
if (UT_LIST_GET_PREV(list, lock)) {
199
ut_a(UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
201
if (UT_LIST_GET_NEXT(list, lock)) {
202
ut_a(UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);
331
205
UT_LIST_REMOVE(list, rw_lock_list, lock);
333
207
mutex_exit(&rw_lock_list_mutex);
335
ut_d(lock->magic_n = 0);
338
210
#ifdef UNIV_DEBUG
339
/******************************************************************//**
211
/**********************************************************************
340
212
Checks that the rw-lock has been initialized and that there are no
341
simultaneous shared and exclusive locks.
213
simultaneous shared and exclusive locks. */
345
216
rw_lock_validate(
346
217
/*=============*/
347
rw_lock_t* lock) /*!< in: rw-lock */
354
waiters = rw_lock_get_waiters(lock);
355
lock_word = lock->lock_word;
357
ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
358
ut_a(waiters == 0 || waiters == 1);
359
ut_a(lock_word > -X_LOCK_DECR ||(-lock_word) % X_LOCK_DECR == 0);
222
mutex_enter(rw_lock_get_mutex(lock));
224
ut_a(lock->magic_n == RW_LOCK_MAGIC_N);
225
ut_a((rw_lock_get_reader_count(lock) == 0)
226
|| (rw_lock_get_writer(lock) != RW_LOCK_EX));
227
ut_a((rw_lock_get_writer(lock) == RW_LOCK_EX)
228
|| (rw_lock_get_writer(lock) == RW_LOCK_WAIT_EX)
229
|| (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED));
230
ut_a((rw_lock_get_waiters(lock) == 0)
231
|| (rw_lock_get_waiters(lock) == 1));
232
ut_a((lock->writer != RW_LOCK_EX) || (lock->writer_count > 0));
234
mutex_exit(rw_lock_get_mutex(lock));
363
238
#endif /* UNIV_DEBUG */
365
/******************************************************************//**
240
/**********************************************************************
366
241
Lock an rw-lock in shared mode for the current thread. If the rw-lock is
367
242
locked in exclusive mode, or there is an exclusive lock request waiting,
368
243
the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
467
339
rw_lock_x_lock_move_ownership(
468
340
/*==========================*/
469
rw_lock_t* lock) /*!< in: lock which was x-locked in the
341
rw_lock_t* lock) /* in: lock which was x-locked in the
472
344
ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
474
rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
346
mutex_enter(&(lock->mutex));
348
lock->writer_thread = os_thread_get_curr_id();
352
mutex_exit(&(lock->mutex));
477
/******************************************************************//**
478
Function for the next writer to call. Waits for readers to exit.
479
The caller must have already decremented lock_word by X_LOCK_DECR. */
355
/**********************************************************************
356
Low-level function for acquiring an exclusive lock. */
484
rw_lock_t* lock, /*!< in: pointer to rw-lock */
485
#ifdef UNIV_SYNC_DEBUG
486
ulint pass, /*!< in: pass value; != 0, if the lock will
361
/* out: RW_LOCK_NOT_LOCKED if did
362
not succeed, RW_LOCK_EX if success,
363
RW_LOCK_WAIT_EX, if got wait reservation */
364
rw_lock_t* lock, /* in: pointer to rw-lock */
365
ulint pass, /* in: pass value; != 0, if the lock will
487
366
be passed to another thread to unlock */
367
const char* file_name,/* in: file name where lock requested */
368
ulint line) /* in: line where requested */
370
ut_ad(mutex_own(rw_lock_get_mutex(lock)));
372
if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
374
if (rw_lock_get_reader_count(lock) == 0) {
376
rw_lock_set_writer(lock, RW_LOCK_EX);
377
lock->writer_thread = os_thread_get_curr_id();
378
lock->writer_count++;
381
#ifdef UNIV_SYNC_DEBUG
382
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
489
const char* file_name,/*!< in: file name where lock requested */
490
ulint line) /*!< in: line where requested */
495
ut_ad(lock->lock_word <= 0);
497
while (lock->lock_word < 0) {
498
if (srv_spin_wait_delay) {
499
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
501
if(i < SYNC_SPIN_ROUNDS) {
506
/* If there is still a reader, then go to sleep.*/
507
rw_x_spin_round_count += i;
509
sync_array_reserve_cell(sync_primary_wait_array,
514
/* Check lock_word to ensure wake-up isn't missed.*/
515
if(lock->lock_word < 0) {
517
/* these stats may not be accurate */
518
lock->count_os_wait++;
519
rw_x_os_wait_count++;
521
/* Add debug info as it is needed to detect possible
522
deadlock. We must add info for WAIT_EX thread for
523
deadlock detection to work properly. */
385
lock->last_x_file_name = file_name;
386
lock->last_x_line = (unsigned int) line;
388
/* Locking succeeded, we may return */
391
/* There are readers, we have to wait */
392
rw_lock_set_writer(lock, RW_LOCK_WAIT_EX);
393
lock->writer_thread = os_thread_get_curr_id();
395
lock->writer_is_wait_ex = TRUE;
524
397
#ifdef UNIV_SYNC_DEBUG
525
398
rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
526
399
file_name, line);
529
sync_array_wait_event(sync_primary_wait_array,
531
#ifdef UNIV_SYNC_DEBUG
532
rw_lock_remove_debug_info(lock, pass,
535
/* It is possible to wake when lock_word < 0.
536
We must pass the while-loop check to proceed.*/
538
sync_array_free_cell(sync_primary_wait_array,
542
rw_x_spin_round_count += i;
545
/******************************************************************//**
546
Low-level function for acquiring an exclusive lock.
547
@return RW_LOCK_NOT_LOCKED if did not succeed, RW_LOCK_EX if success. */
552
rw_lock_t* lock, /*!< in: pointer to rw-lock */
553
ulint pass, /*!< in: pass value; != 0, if the lock will
554
be passed to another thread to unlock */
555
const char* file_name,/*!< in: file name where lock requested */
556
ulint line) /*!< in: line where requested */
558
os_thread_id_t curr_thread = os_thread_get_curr_id();
560
if (rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
562
/* lock->recursive also tells us if the writer_thread
563
field is stale or active. As we are going to write
564
our own thread id in that field it must be that the
565
current writer_thread value is not active. */
566
ut_a(!lock->recursive);
568
/* Decrement occurred: we are writer or next-writer. */
569
rw_lock_set_writer_id_and_recursion_flag(lock,
570
pass ? FALSE : TRUE);
572
rw_lock_x_lock_wait(lock,
573
#ifdef UNIV_SYNC_DEBUG
579
/* Decrement failed: relock or failed lock */
580
if (!pass && lock->recursive
581
&& os_thread_eq(lock->writer_thread, curr_thread)) {
583
lock->lock_word -= X_LOCK_DECR;
585
/* Another thread locked before us */
589
#ifdef UNIV_SYNC_DEBUG
590
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
593
lock->last_x_file_name = file_name;
594
lock->last_x_line = (unsigned int) line;
599
/******************************************************************//**
402
return(RW_LOCK_WAIT_EX);
405
} else if ((rw_lock_get_writer(lock) == RW_LOCK_WAIT_EX)
406
&& os_thread_eq(lock->writer_thread,
407
os_thread_get_curr_id())) {
409
if (rw_lock_get_reader_count(lock) == 0) {
411
rw_lock_set_writer(lock, RW_LOCK_EX);
412
lock->writer_count++;
414
lock->writer_is_wait_ex = FALSE;
416
#ifdef UNIV_SYNC_DEBUG
417
rw_lock_remove_debug_info(lock, pass, RW_LOCK_WAIT_EX);
418
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
422
lock->last_x_file_name = file_name;
423
lock->last_x_line = (unsigned int) line;
425
/* Locking succeeded, we may return */
429
return(RW_LOCK_WAIT_EX);
431
} else if ((rw_lock_get_writer(lock) == RW_LOCK_EX)
432
&& os_thread_eq(lock->writer_thread,
433
os_thread_get_curr_id())
437
lock->writer_count++;
439
#ifdef UNIV_SYNC_DEBUG
440
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, file_name,
444
lock->last_x_file_name = file_name;
445
lock->last_x_line = (unsigned int) line;
447
/* Locking succeeded, we may return */
451
/* Locking did not succeed */
452
return(RW_LOCK_NOT_LOCKED);
455
/**********************************************************************
600
456
NOTE! Use the corresponding macro, not directly this function! Lock an
601
457
rw-lock in exclusive mode for the current thread. If the rw-lock is locked
602
458
in shared or exclusive mode, or there is an exclusive lock request waiting,
610
466
rw_lock_x_lock_func(
611
467
/*================*/
612
rw_lock_t* lock, /*!< in: pointer to rw-lock */
613
ulint pass, /*!< in: pass value; != 0, if the lock will
468
rw_lock_t* lock, /* in: pointer to rw-lock */
469
ulint pass, /* in: pass value; != 0, if the lock will
614
470
be passed to another thread to unlock */
615
const char* file_name,/*!< in: file name where lock requested */
616
ulint line) /*!< in: line where requested */
471
const char* file_name,/* in: file name where lock requested */
472
ulint line) /* in: line where requested */
618
ulint index; /*!< index of the reserved wait cell */
619
ulint i; /*!< spin round count */
620
ibool spinning = FALSE;
474
ulint index; /* index of the reserved wait cell */
475
ulint state; /* lock state acquired */
476
ulint i; /* spin round count */
622
478
ut_ad(rw_lock_validate(lock));
628
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
629
rw_x_spin_round_count += i;
481
/* Acquire the mutex protecting the rw-lock fields */
482
mutex_enter_fast(&(lock->mutex));
484
state = rw_lock_x_lock_low(lock, pass, file_name, line);
486
mutex_exit(&(lock->mutex));
488
if (state == RW_LOCK_EX) {
631
490
return; /* Locking succeeded */
492
} else if (state == RW_LOCK_NOT_LOCKED) {
494
/* Spin waiting for the writer field to become free */
497
while (rw_lock_get_writer(lock) != RW_LOCK_NOT_LOCKED
498
&& i < SYNC_SPIN_ROUNDS) {
499
if (srv_spin_wait_delay) {
500
ut_delay(ut_rnd_interval(0,
501
srv_spin_wait_delay));
506
if (i == SYNC_SPIN_ROUNDS) {
509
} else if (state == RW_LOCK_WAIT_EX) {
511
/* Spin waiting for the reader count field to become zero */
514
while (rw_lock_get_reader_count(lock) != 0
515
&& i < SYNC_SPIN_ROUNDS) {
516
if (srv_spin_wait_delay) {
517
ut_delay(ut_rnd_interval(0,
518
srv_spin_wait_delay));
523
if (i == SYNC_SPIN_ROUNDS) {
637
rw_x_spin_wait_count++;
640
/* Spin waiting for the lock_word to become free */
641
while (i < SYNC_SPIN_ROUNDS
642
&& lock->lock_word <= 0) {
643
if (srv_spin_wait_delay) {
644
ut_delay(ut_rnd_interval(0,
645
srv_spin_wait_delay));
650
if (i == SYNC_SPIN_ROUNDS) {
527
i = 0; /* Eliminate a compiler warning */
657
rw_x_spin_round_count += i;
659
531
if (srv_print_latch_waits) {
661
533
"Thread %lu spin wait rw-x-lock at %p"