1
/*****************************************************************************
3
Copyright (C) 1995, 2009, Innobase Oy. All Rights Reserved.
4
Copyright (C) 2008, Google Inc.
6
Portions of this file contain modifications contributed and copyrighted by
7
Google, Inc. Those modifications are gratefully acknowledged and are described
8
briefly in the InnoDB documentation. The contributions by Google are
9
incorporated with their permission, and subject to the conditions contained in
10
the file COPYING.Google.
12
This program is free software; you can redistribute it and/or modify it under
13
the terms of the GNU General Public License as published by the Free Software
14
Foundation; version 2 of the License.
16
This program is distributed in the hope that it will be useful, but WITHOUT
17
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
You should have received a copy of the GNU General Public License along with
21
this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
22
St, Fifth Floor, Boston, MA 02110-1301 USA
24
*****************************************************************************/
26
/**************************************************//**
28
The read-write lock (for thread synchronization)
30
Created 9/11/1995 Heikki Tuuri
31
*******************************************************/
38
#include "os0thread.h"
41
#include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */
44
IMPLEMENTATION OF THE RW_LOCK
45
=============================
46
The status of a rw_lock is held in lock_word. The initial value of lock_word is
47
X_LOCK_DECR. lock_word is decremented by 1 for each s-lock and by X_LOCK_DECR
48
for each x-lock. This describes the lock state for each value of lock_word:
50
lock_word == X_LOCK_DECR: Unlocked.
51
0 < lock_word < X_LOCK_DECR: Read locked, no waiting writers.
52
(X_LOCK_DECR - lock_word) is the
53
number of readers that hold the lock.
54
lock_word == 0: Write locked
55
-X_LOCK_DECR < lock_word < 0: Read locked, with a waiting writer.
56
(-lock_word) is the number of readers
58
lock_word <= -X_LOCK_DECR: Recursively write locked. lock_word has been
59
decremented by X_LOCK_DECR once for each lock,
60
so the number of locks is:
61
((-lock_word) / X_LOCK_DECR) + 1
62
When lock_word <= -X_LOCK_DECR, we also know that lock_word % X_LOCK_DECR == 0:
63
other values of lock_word are invalid.
65
The lock_word is always read and updated atomically and consistently, so that
66
it always represents the state of the lock, and the state of the lock changes
67
with a single atomic operation. This lock_word holds all of the information
68
that a thread needs in order to determine if it is eligible to gain the lock
69
or if it must spin or sleep. The one exception to this is that writer_thread
70
must be verified before recursive write locks: to solve this scenario, we make
71
writer_thread readable by all threads, but only writeable by the x-lock holder.
73
The other members of the lock obey the following rules to remain consistent:
75
recursive: This and the writer_thread field together control the
76
behaviour of recursive x-locking.
77
lock->recursive must be FALSE in following states:
78
1) The writer_thread contains garbage i.e.: the
79
lock has just been initialized.
80
2) The lock is not x-held and there is no
81
x-waiter waiting on WAIT_EX event.
82
3) The lock is x-held or there is an x-waiter
83
waiting on WAIT_EX event but the 'pass' value
85
lock->recursive is TRUE iff:
86
1) The lock is x-held or there is an x-waiter
87
waiting on WAIT_EX event and the 'pass' value
89
This flag must be set after the writer_thread field
90
has been updated with a memory ordering barrier.
91
It is unset before the lock_word has been incremented.
92
writer_thread: Is used only in recursive x-locking. Can only be safely
93
read iff lock->recursive flag is TRUE.
94
This field is uninitialized at lock creation time and
95
is updated atomically when x-lock is acquired or when
96
move_ownership is called. A thread is only allowed to
97
set the value of this field to it's thread_id i.e.: a
98
thread cannot set writer_thread to some other thread's
100
waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
101
signals, it should only be set to 1 when there are threads
102
waiting on event. Must be 1 when a writer starts waiting to
103
ensure the current x-locking thread sends a wake-up signal
104
during unlock. May only be reset to 0 immediately before a
105
a wake-up signal is sent to event. On most platforms, a
106
memory barrier is required after waiters is set, and before
107
verifying lock_word is still held, to ensure some unlocker
108
really does see the flags new value.
109
event: Threads wait on event for read or writer lock when another
110
thread has an x-lock or an x-lock reservation (wait_ex). A
111
thread may only wait on event after performing the following
113
(1) Record the counter value of event (with os_event_reset).
114
(2) Set waiters to 1.
115
(3) Verify lock_word <= 0.
116
(1) must come before (2) to ensure signal is not missed.
117
(2) must come before (3) to ensure a signal is sent.
118
These restrictions force the above ordering.
119
Immediately before sending the wake-up signal, we should:
120
(1) Verify lock_word == X_LOCK_DECR (unlocked)
121
(2) Reset waiters to 0.
122
wait_ex_event: A thread may only wait on the wait_ex_event after it has
123
performed the following actions in order:
124
(1) Decrement lock_word by X_LOCK_DECR.
125
(2) Record counter value of wait_ex_event (os_event_reset,
126
called from sync_array_reserve_cell).
127
(3) Verify that lock_word < 0.
128
(1) must come first to ensures no other threads become reader
129
or next writer, and notifies unlocker that signal must be sent.
130
(2) must come before (3) to ensure the signal is not missed.
131
These restrictions force the above ordering.
132
Immediately before sending the wake-up signal, we should:
133
Verify lock_word == 0 (waiting thread holds x_lock)
137
/** number of spin waits on rw-latches,
138
resulted during shared (read) locks */
139
UNIV_INTERN ib_int64_t rw_s_spin_wait_count = 0;
140
/** number of spin loop rounds on rw-latches,
141
resulted during shared (read) locks */
142
UNIV_INTERN ib_int64_t rw_s_spin_round_count = 0;
144
/** number of OS waits on rw-latches,
145
resulted during shared (read) locks */
146
UNIV_INTERN ib_int64_t rw_s_os_wait_count = 0;
148
/** number of unlocks (that unlock shared locks),
149
set only when UNIV_SYNC_PERF_STAT is defined */
150
UNIV_INTERN ib_int64_t rw_s_exit_count = 0;
152
/** number of spin waits on rw-latches,
153
resulted during exclusive (write) locks */
154
UNIV_INTERN ib_int64_t rw_x_spin_wait_count = 0;
155
/** number of spin loop rounds on rw-latches,
156
resulted during exclusive (write) locks */
157
UNIV_INTERN ib_int64_t rw_x_spin_round_count = 0;
159
/** number of OS waits on rw-latches,
160
resulted during exclusive (write) locks */
161
UNIV_INTERN ib_int64_t rw_x_os_wait_count = 0;
163
/** number of unlocks (that unlock exclusive locks),
164
set only when UNIV_SYNC_PERF_STAT is defined */
165
UNIV_INTERN ib_int64_t rw_x_exit_count = 0;
167
/* The global list of rw-locks */
168
UNIV_INTERN rw_lock_list_t rw_lock_list;
169
UNIV_INTERN mutex_t rw_lock_list_mutex;
171
#ifdef UNIV_PFS_MUTEX
172
UNIV_INTERN mysql_pfs_key_t rw_lock_list_mutex_key;
173
UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key;
174
#endif /* UNIV_PFS_MUTEX */
176
#ifdef UNIV_SYNC_DEBUG
177
/* The global mutex which protects debug info lists of all rw-locks.
178
To modify the debug info list of an rw-lock, this mutex has to be
179
acquired in addition to the mutex protecting the lock. */
181
UNIV_INTERN mutex_t rw_lock_debug_mutex;
183
# ifdef UNIV_PFS_MUTEX
184
UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key;
187
/* If deadlock detection does not get immediately the mutex,
188
it may wait for this event */
189
UNIV_INTERN os_event_t rw_lock_debug_event;
190
/* This is set to TRUE, if there may be waiters for the event */
191
UNIV_INTERN ibool rw_lock_debug_waiters;
193
/******************************************************************//**
194
Creates a debug info struct. */
197
rw_lock_debug_create(void);
198
/*======================*/
199
/******************************************************************//**
200
Frees a debug info struct. */
205
rw_lock_debug_t* info);
207
/******************************************************************//**
208
Creates a debug info struct.
209
@return own: debug info struct */
212
rw_lock_debug_create(void)
213
/*======================*/
215
return((rw_lock_debug_t*) mem_alloc(sizeof(rw_lock_debug_t)));
218
/******************************************************************//**
219
Frees a debug info struct. */
224
rw_lock_debug_t* info)
228
#endif /* UNIV_SYNC_DEBUG */
230
/******************************************************************//**
231
Creates, or rather, initializes an rw-lock object in a specified memory
232
location (which must be appropriately aligned). The rw-lock is initialized
233
to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free
234
is necessary only if the memory block containing it is freed. */
239
rw_lock_t* lock, /*!< in: pointer to memory */
241
# ifdef UNIV_SYNC_DEBUG
242
ulint level, /*!< in: level */
243
# endif /* UNIV_SYNC_DEBUG */
244
const char* cmutex_name, /*!< in: mutex name */
245
#endif /* UNIV_DEBUG */
246
const char* cfile_name, /*!< in: file name where created */
247
ulint cline) /*!< in: file line where created */
249
/* If this is the very first time a synchronization object is
250
created, then the following call initializes the sync system. */
252
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
253
mutex_create(rw_lock_mutex_key, rw_lock_get_mutex(lock),
254
SYNC_NO_ORDER_CHECK);
256
lock->mutex.cfile_name = cfile_name;
257
lock->mutex.cline = cline;
259
ut_d(lock->mutex.cmutex_name = cmutex_name);
260
ut_d(lock->mutex.mutex_type = 1);
261
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
263
UT_NOT_USED(cmutex_name);
265
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
267
lock->lock_word = X_LOCK_DECR;
270
/* We set this value to signify that lock->writer_thread
271
contains garbage at initialization and cannot be used for
272
recursive x-locking. */
273
lock->recursive = FALSE;
275
#ifdef UNIV_SYNC_DEBUG
276
UT_LIST_INIT(lock->debug_list);
279
#endif /* UNIV_SYNC_DEBUG */
281
ut_d(lock->magic_n = RW_LOCK_MAGIC_N);
283
lock->cfile_name = cfile_name;
284
lock->cline = (unsigned int) cline;
286
lock->count_os_wait = 0;
287
lock->last_s_file_name = "not yet reserved";
288
lock->last_x_file_name = "not yet reserved";
289
lock->last_s_line = 0;
290
lock->last_x_line = 0;
291
lock->event = os_event_create(NULL);
292
lock->wait_ex_event = os_event_create(NULL);
294
mutex_enter(&rw_lock_list_mutex);
296
ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL
297
|| UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N);
299
UT_LIST_ADD_FIRST(list, rw_lock_list, lock);
301
mutex_exit(&rw_lock_list_mutex);
304
/******************************************************************//**
305
Calling this function is obligatory only if the memory buffer containing
306
the rw-lock is freed. Removes an rw-lock object from the global list. The
307
rw-lock is checked to be in the non-locked state. */
312
rw_lock_t* lock) /*!< in: rw-lock */
314
ut_ad(rw_lock_validate(lock));
315
ut_a(lock->lock_word == X_LOCK_DECR);
317
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
318
mutex_free(rw_lock_get_mutex(lock));
319
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
321
mutex_enter(&rw_lock_list_mutex);
322
os_event_free(lock->event);
324
os_event_free(lock->wait_ex_event);
326
ut_ad(UT_LIST_GET_PREV(list, lock) == NULL
327
|| UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
328
ut_ad(UT_LIST_GET_NEXT(list, lock) == NULL
329
|| UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);
331
UT_LIST_REMOVE(list, rw_lock_list, lock);
333
mutex_exit(&rw_lock_list_mutex);
335
ut_d(lock->magic_n = 0);
339
/******************************************************************//**
340
Checks that the rw-lock has been initialized and that there are no
341
simultaneous shared and exclusive locks.
347
rw_lock_t* lock) /*!< in: rw-lock */
354
waiters = rw_lock_get_waiters(lock);
355
lock_word = lock->lock_word;
357
ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
358
ut_a(waiters == 0 || waiters == 1);
359
ut_a(lock_word > -X_LOCK_DECR ||(-lock_word) % X_LOCK_DECR == 0);
363
#endif /* UNIV_DEBUG */
365
/******************************************************************//**
366
Lock an rw-lock in shared mode for the current thread. If the rw-lock is
367
locked in exclusive mode, or there is an exclusive lock request waiting,
368
the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
369
for the lock, before suspending the thread. */
374
rw_lock_t* lock, /*!< in: pointer to rw-lock */
375
ulint pass, /*!< in: pass value; != 0, if the lock
376
will be passed to another thread to unlock */
377
const char* file_name, /*!< in: file name where lock requested */
378
ulint line) /*!< in: line where requested */
380
ulint index; /* index of the reserved wait cell */
381
ulint i = 0; /* spin round count */
383
ut_ad(rw_lock_validate(lock));
385
rw_s_spin_wait_count++; /*!< Count calls to this function */
388
/* Spin waiting for the writer field to become free */
389
while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) {
390
if (srv_spin_wait_delay) {
391
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
397
if (i == SYNC_SPIN_ROUNDS) {
401
if (srv_print_latch_waits) {
403
"Thread %lu spin wait rw-s-lock at %p"
404
" cfile %s cline %lu rnds %lu\n",
405
(ulong) os_thread_pf(os_thread_get_curr_id()),
407
lock->cfile_name, (ulong) lock->cline, (ulong) i);
410
/* We try once again to obtain the lock */
411
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
412
rw_s_spin_round_count += i;
414
return; /* Success */
417
if (i < SYNC_SPIN_ROUNDS) {
421
rw_s_spin_round_count += i;
423
sync_array_reserve_cell(sync_primary_wait_array,
424
lock, RW_LOCK_SHARED,
428
/* Set waiters before checking lock_word to ensure wake-up
429
signal is sent. This may lead to some unnecessary signals. */
430
rw_lock_set_waiter_flag(lock);
432
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
433
sync_array_free_cell(sync_primary_wait_array, index);
434
return; /* Success */
437
if (srv_print_latch_waits) {
439
"Thread %lu OS wait rw-s-lock at %p"
440
" cfile %s cline %lu\n",
441
os_thread_pf(os_thread_get_curr_id()),
442
(void*) lock, lock->cfile_name,
443
(ulong) lock->cline);
446
/* these stats may not be accurate */
447
lock->count_os_wait++;
448
rw_s_os_wait_count++;
450
sync_array_wait_event(sync_primary_wait_array, index);
457
/******************************************************************//**
458
This function is used in the insert buffer to move the ownership of an
459
x-latch on a buffer frame to the current thread. The x-latch was set by
460
the buffer read operation and it protected the buffer frame while the
461
read was done. The ownership is moved because we want that the current
462
thread is able to acquire a second x-latch which is stored in an mtr.
463
This, in turn, is needed to pass the debug checks of index page
467
rw_lock_x_lock_move_ownership(
468
/*==========================*/
469
rw_lock_t* lock) /*!< in: lock which was x-locked in the
472
ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
474
rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
477
/******************************************************************//**
478
Function for the next writer to call. Waits for readers to exit.
479
The caller must have already decremented lock_word by X_LOCK_DECR. */
484
rw_lock_t* lock, /*!< in: pointer to rw-lock */
485
#ifdef UNIV_SYNC_DEBUG
486
ulint pass, /*!< in: pass value; != 0, if the lock will
487
be passed to another thread to unlock */
489
const char* file_name,/*!< in: file name where lock requested */
490
ulint line) /*!< in: line where requested */
495
ut_ad(lock->lock_word <= 0);
497
while (lock->lock_word < 0) {
498
if (srv_spin_wait_delay) {
499
ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
501
if(i < SYNC_SPIN_ROUNDS) {
506
/* If there is still a reader, then go to sleep.*/
507
rw_x_spin_round_count += i;
509
sync_array_reserve_cell(sync_primary_wait_array,
514
/* Check lock_word to ensure wake-up isn't missed.*/
515
if(lock->lock_word < 0) {
517
/* these stats may not be accurate */
518
lock->count_os_wait++;
519
rw_x_os_wait_count++;
521
/* Add debug info as it is needed to detect possible
522
deadlock. We must add info for WAIT_EX thread for
523
deadlock detection to work properly. */
524
#ifdef UNIV_SYNC_DEBUG
525
rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
529
sync_array_wait_event(sync_primary_wait_array,
531
#ifdef UNIV_SYNC_DEBUG
532
rw_lock_remove_debug_info(lock, pass,
535
/* It is possible to wake when lock_word < 0.
536
We must pass the while-loop check to proceed.*/
538
sync_array_free_cell(sync_primary_wait_array,
542
rw_x_spin_round_count += i;
545
/******************************************************************//**
546
Low-level function for acquiring an exclusive lock.
547
@return RW_LOCK_NOT_LOCKED if did not succeed, RW_LOCK_EX if success. */
552
rw_lock_t* lock, /*!< in: pointer to rw-lock */
553
ulint pass, /*!< in: pass value; != 0, if the lock will
554
be passed to another thread to unlock */
555
const char* file_name,/*!< in: file name where lock requested */
556
ulint line) /*!< in: line where requested */
558
os_thread_id_t curr_thread = os_thread_get_curr_id();
560
if (rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
562
/* lock->recursive also tells us if the writer_thread
563
field is stale or active. As we are going to write
564
our own thread id in that field it must be that the
565
current writer_thread value is not active. */
566
ut_a(!lock->recursive);
568
/* Decrement occurred: we are writer or next-writer. */
569
rw_lock_set_writer_id_and_recursion_flag(lock,
570
pass ? FALSE : TRUE);
572
rw_lock_x_lock_wait(lock,
573
#ifdef UNIV_SYNC_DEBUG
579
/* Decrement failed: relock or failed lock */
580
if (!pass && lock->recursive
581
&& os_thread_eq(lock->writer_thread, curr_thread)) {
583
lock->lock_word -= X_LOCK_DECR;
585
/* Another thread locked before us */
589
#ifdef UNIV_SYNC_DEBUG
590
rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
593
lock->last_x_file_name = file_name;
594
lock->last_x_line = (unsigned int) line;
599
/******************************************************************//**
600
NOTE! Use the corresponding macro, not directly this function! Lock an
601
rw-lock in exclusive mode for the current thread. If the rw-lock is locked
602
in shared or exclusive mode, or there is an exclusive lock request waiting,
603
the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
604
for the lock before suspending the thread. If the same thread has an x-lock
605
on the rw-lock, locking succeed, with the following exception: if pass != 0,
606
only a single x-lock may be taken on the lock. NOTE: If the same thread has
607
an s-lock, locking does not succeed! */
612
rw_lock_t* lock, /*!< in: pointer to rw-lock */
613
ulint pass, /*!< in: pass value; != 0, if the lock will
614
be passed to another thread to unlock */
615
const char* file_name,/*!< in: file name where lock requested */
616
ulint line) /*!< in: line where requested */
618
ulint index; /*!< index of the reserved wait cell */
619
ulint i; /*!< spin round count */
620
ibool spinning = FALSE;
622
ut_ad(rw_lock_validate(lock));
628
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
629
rw_x_spin_round_count += i;
631
return; /* Locking succeeded */
637
rw_x_spin_wait_count++;
640
/* Spin waiting for the lock_word to become free */
641
while (i < SYNC_SPIN_ROUNDS
642
&& lock->lock_word <= 0) {
643
if (srv_spin_wait_delay) {
644
ut_delay(ut_rnd_interval(0,
645
srv_spin_wait_delay));
650
if (i == SYNC_SPIN_ROUNDS) {
657
rw_x_spin_round_count += i;
659
if (srv_print_latch_waits) {
661
"Thread %lu spin wait rw-x-lock at %p"
662
" cfile %s cline %lu rnds %lu\n",
663
os_thread_pf(os_thread_get_curr_id()), (void*) lock,
664
lock->cfile_name, (ulong) lock->cline, (ulong) i);
667
sync_array_reserve_cell(sync_primary_wait_array,
673
/* Waiters must be set before checking lock_word, to ensure signal
674
is sent. This could lead to a few unnecessary wake-up signals. */
675
rw_lock_set_waiter_flag(lock);
677
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
678
sync_array_free_cell(sync_primary_wait_array, index);
679
return; /* Locking succeeded */
682
if (srv_print_latch_waits) {
684
"Thread %lu OS wait for rw-x-lock at %p"
685
" cfile %s cline %lu\n",
686
os_thread_pf(os_thread_get_curr_id()), (void*) lock,
687
lock->cfile_name, (ulong) lock->cline);
690
/* these stats may not be accurate */
691
lock->count_os_wait++;
692
rw_x_os_wait_count++;
694
sync_array_wait_event(sync_primary_wait_array, index);
700
#ifdef UNIV_SYNC_DEBUG
701
/******************************************************************//**
702
Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
703
because the debug mutex is also acquired in sync0arr while holding the OS
704
mutex protecting the sync array, and the ordinary mutex_enter might
705
recursively call routines in sync0arr, leading to a deadlock on the OS
709
rw_lock_debug_mutex_enter(void)
710
/*==========================*/
713
if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
717
os_event_reset(rw_lock_debug_event);
719
rw_lock_debug_waiters = TRUE;
721
if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
725
os_event_wait(rw_lock_debug_event);
730
/******************************************************************//**
731
Releases the debug mutex. */
734
rw_lock_debug_mutex_exit(void)
735
/*==========================*/
737
mutex_exit(&rw_lock_debug_mutex);
739
if (rw_lock_debug_waiters) {
740
rw_lock_debug_waiters = FALSE;
741
os_event_set(rw_lock_debug_event);
745
/******************************************************************//**
746
Inserts the debug information for an rw-lock. */
749
rw_lock_add_debug_info(
750
/*===================*/
751
rw_lock_t* lock, /*!< in: rw-lock */
752
ulint pass, /*!< in: pass value */
753
ulint lock_type, /*!< in: lock type */
754
const char* file_name, /*!< in: file where requested */
755
ulint line) /*!< in: line where requested */
757
rw_lock_debug_t* info;
762
info = rw_lock_debug_create();
764
rw_lock_debug_mutex_enter();
766
info->file_name = file_name;
768
info->lock_type = lock_type;
769
info->thread_id = os_thread_get_curr_id();
772
UT_LIST_ADD_FIRST(list, lock->debug_list, info);
774
rw_lock_debug_mutex_exit();
776
if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
777
sync_thread_add_level(lock, lock->level);
781
/******************************************************************//**
782
Removes a debug information struct for an rw-lock. */
785
rw_lock_remove_debug_info(
786
/*======================*/
787
rw_lock_t* lock, /*!< in: rw-lock */
788
ulint pass, /*!< in: pass value */
789
ulint lock_type) /*!< in: lock type */
791
rw_lock_debug_t* info;
795
if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
796
sync_thread_reset_level(lock);
799
rw_lock_debug_mutex_enter();
801
info = UT_LIST_GET_FIRST(lock->debug_list);
803
while (info != NULL) {
804
if ((pass == info->pass)
806
|| os_thread_eq(info->thread_id,
807
os_thread_get_curr_id()))
808
&& (info->lock_type == lock_type)) {
811
UT_LIST_REMOVE(list, lock->debug_list, info);
812
rw_lock_debug_mutex_exit();
814
rw_lock_debug_free(info);
819
info = UT_LIST_GET_NEXT(list, info);
824
#endif /* UNIV_SYNC_DEBUG */
826
#ifdef UNIV_SYNC_DEBUG
827
/******************************************************************//**
828
Checks if the thread has locked the rw-lock in the specified mode, with
830
@return TRUE if locked */
835
rw_lock_t* lock, /*!< in: rw-lock */
836
ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED,
839
rw_lock_debug_t* info;
842
ut_ad(rw_lock_validate(lock));
844
rw_lock_debug_mutex_enter();
846
info = UT_LIST_GET_FIRST(lock->debug_list);
848
while (info != NULL) {
850
if (os_thread_eq(info->thread_id, os_thread_get_curr_id())
852
&& (info->lock_type == lock_type)) {
854
rw_lock_debug_mutex_exit();
860
info = UT_LIST_GET_NEXT(list, info);
862
rw_lock_debug_mutex_exit();
866
#endif /* UNIV_SYNC_DEBUG */
868
/******************************************************************//**
869
Checks if somebody has locked the rw-lock in the specified mode.
870
@return TRUE if locked */
875
rw_lock_t* lock, /*!< in: rw-lock */
876
ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED,
882
ut_ad(rw_lock_validate(lock));
884
if (lock_type == RW_LOCK_SHARED) {
885
if (rw_lock_get_reader_count(lock) > 0) {
888
} else if (lock_type == RW_LOCK_EX) {
889
if (rw_lock_get_writer(lock) == RW_LOCK_EX) {
899
#ifdef UNIV_SYNC_DEBUG
900
/***************************************************************//**
901
Prints debug info of currently locked rw-locks. */
904
rw_lock_list_print_info(
905
/*====================*/
906
FILE* file) /*!< in: file where to print */
910
rw_lock_debug_t* info;
912
mutex_enter(&rw_lock_list_mutex);
914
fputs("-------------\n"
916
"-------------\n", file);
918
lock = UT_LIST_GET_FIRST(rw_lock_list);
920
while (lock != NULL) {
924
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
925
mutex_enter(&(lock->mutex));
927
if (lock->lock_word != X_LOCK_DECR) {
929
fprintf(file, "RW-LOCK: %p ", (void*) lock);
931
if (rw_lock_get_waiters(lock)) {
932
fputs(" Waiters for the lock exist\n", file);
937
info = UT_LIST_GET_FIRST(lock->debug_list);
938
while (info != NULL) {
939
rw_lock_debug_print(info);
940
info = UT_LIST_GET_NEXT(list, info);
943
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
944
mutex_exit(&(lock->mutex));
947
lock = UT_LIST_GET_NEXT(list, lock);
950
fprintf(file, "Total number of rw-locks %ld\n", count);
951
mutex_exit(&rw_lock_list_mutex);
954
/***************************************************************//**
955
Prints debug info of an rw-lock. */
960
rw_lock_t* lock) /*!< in: rw-lock */
962
rw_lock_debug_t* info;
967
"RW-LATCH: %p ", (void*) lock);
969
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
970
/* We used to acquire lock->mutex here, but it would cause a
971
recursive call to sync_thread_add_level() if UNIV_SYNC_DEBUG
972
is defined. Since this function is only invoked from
973
sync_thread_levels_g(), let us choose the smaller evil:
974
performing dirty reads instead of causing bogus deadlocks or
975
assertion failures. */
977
if (lock->lock_word != X_LOCK_DECR) {
979
if (rw_lock_get_waiters(lock)) {
980
fputs(" Waiters for the lock exist\n", stderr);
985
info = UT_LIST_GET_FIRST(lock->debug_list);
986
while (info != NULL) {
987
rw_lock_debug_print(info);
988
info = UT_LIST_GET_NEXT(list, info);
993
/*********************************************************************//**
994
Prints info of a debug struct. */
999
rw_lock_debug_t* info) /*!< in: debug struct */
1003
rwt = info->lock_type;
1005
fprintf(stderr, "Locked: thread %lu file %s line %lu ",
1006
(ulong) os_thread_pf(info->thread_id), info->file_name,
1007
(ulong) info->line);
1008
if (rwt == RW_LOCK_SHARED) {
1009
fputs("S-LOCK", stderr);
1010
} else if (rwt == RW_LOCK_EX) {
1011
fputs("X-LOCK", stderr);
1012
} else if (rwt == RW_LOCK_WAIT_EX) {
1013
fputs("WAIT X-LOCK", stderr);
1017
if (info->pass != 0) {
1018
fprintf(stderr, " pass value %lu", (ulong) info->pass);
1023
/***************************************************************//**
1024
Returns the number of currently locked rw-locks. Works only in the debug
1026
@return number of locked rw-locks */
1029
rw_lock_n_locked(void)
1030
/*==================*/
1035
mutex_enter(&rw_lock_list_mutex);
1037
lock = UT_LIST_GET_FIRST(rw_lock_list);
1039
while (lock != NULL) {
1041
if (lock->lock_word != X_LOCK_DECR) {
1045
lock = UT_LIST_GET_NEXT(list, lock);
1048
mutex_exit(&rw_lock_list_mutex);
1052
#endif /* UNIV_SYNC_DEBUG */