1
/*****************************************************************************
3
Copyright (C) 1995, 2009, Innobase Oy. All Rights Reserved.
4
Copyright (C) 2008, Google Inc.
6
Portions of this file contain modifications contributed and copyrighted by
7
Google, Inc. Those modifications are gratefully acknowledged and are described
8
briefly in the InnoDB documentation. The contributions by Google are
9
incorporated with their permission, and subject to the conditions contained in
10
the file COPYING.Google.
12
This program is free software; you can redistribute it and/or modify it under
13
the terms of the GNU General Public License as published by the Free Software
14
Foundation; version 2 of the License.
16
This program is distributed in the hope that it will be useful, but WITHOUT
17
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
You should have received a copy of the GNU General Public License along with
21
this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
22
St, Fifth Floor, Boston, MA 02110-1301 USA
24
*****************************************************************************/
26
/**************************************************//**
27
@file include/sync0rw.ic
28
The read-write lock (for threads)
30
Created 9/11/1995 Heikki Tuuri
31
*******************************************************/
33
/******************************************************************//**
34
Lock an rw-lock in shared mode for the current thread. If the rw-lock is
35
locked in exclusive mode, or there is an exclusive lock request waiting,
36
the function spins a preset time (controlled by SYNC_SPIN_ROUNDS),
37
waiting for the lock before suspending the thread. */
42
rw_lock_t* lock, /*!< in: pointer to rw-lock */
43
ulint pass, /*!< in: pass value; != 0, if the lock will
44
be passed to another thread to unlock */
45
const char* file_name,/*!< in: file name where lock requested */
46
ulint line); /*!< in: line where requested */
47
#ifdef UNIV_SYNC_DEBUG
48
/******************************************************************//**
49
Inserts the debug information for an rw-lock. */
52
rw_lock_add_debug_info(
53
/*===================*/
54
rw_lock_t* lock, /*!< in: rw-lock */
55
ulint pass, /*!< in: pass value */
56
ulint lock_type, /*!< in: lock type */
57
const char* file_name, /*!< in: file where requested */
58
ulint line); /*!< in: line where requested */
59
/******************************************************************//**
60
Removes a debug information struct for an rw-lock. */
63
rw_lock_remove_debug_info(
64
/*======================*/
65
rw_lock_t* lock, /*!< in: rw-lock */
66
ulint pass, /*!< in: pass value */
67
ulint lock_type); /*!< in: lock type */
68
#endif /* UNIV_SYNC_DEBUG */
70
/********************************************************************//**
71
Check if there are threads waiting for the rw-lock.
72
@return 1 if waiters, 0 otherwise */
77
const rw_lock_t* lock) /*!< in: rw-lock */
79
return(lock->waiters);
82
/********************************************************************//**
83
Sets lock->waiters to 1. It is not an error if lock->waiters is already
84
1. On platforms where ATOMIC builtins are used this function enforces a
88
rw_lock_set_waiter_flag(
89
/*====================*/
90
rw_lock_t* lock) /*!< in/out: rw-lock */
92
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
93
if (! os_compare_and_swap_ulint(&lock->waiters, 0, 1))
95
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
97
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
100
/********************************************************************//**
101
Resets lock->waiters to 0. It is not an error if lock->waiters is already
102
0. On platforms where ATOMIC builtins are used this function enforces a
106
rw_lock_reset_waiter_flag(
107
/*======================*/
108
rw_lock_t* lock) /*!< in/out: rw-lock */
110
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
111
if (! os_compare_and_swap_ulint(&lock->waiters, 1, 0))
113
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
115
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
118
/******************************************************************//**
119
Returns the write-status of the lock - this function made more sense
120
with the old rw_lock implementation.
121
@return RW_LOCK_NOT_LOCKED, RW_LOCK_EX, RW_LOCK_WAIT_EX */
126
const rw_lock_t* lock) /*!< in: rw-lock */
128
lint lock_word = lock->lock_word;
130
/* return NOT_LOCKED in s-lock state, like the writer
131
member of the old lock implementation. */
132
return(RW_LOCK_NOT_LOCKED);
133
} else if (((-lock_word) % X_LOCK_DECR) == 0) {
136
ut_ad(lock_word > -X_LOCK_DECR);
137
return(RW_LOCK_WAIT_EX);
141
/******************************************************************//**
142
Returns the number of readers.
143
@return number of readers */
146
rw_lock_get_reader_count(
147
/*=====================*/
148
const rw_lock_t* lock) /*!< in: rw-lock */
150
lint lock_word = lock->lock_word;
152
/* s-locked, no x-waiters */
153
return(X_LOCK_DECR - lock_word);
154
} else if (lock_word < 0 && lock_word > -X_LOCK_DECR) {
155
/* s-locked, with x-waiters */
156
return((ulint)(-lock_word));
161
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
168
return(&(lock->mutex));
172
/******************************************************************//**
173
Returns the value of writer_count for the lock. Does not reserve the lock
174
mutex, so the caller must be sure it is not changed during the call.
175
@return value of writer_count */
178
rw_lock_get_x_lock_count(
179
/*=====================*/
180
const rw_lock_t* lock) /*!< in: rw-lock */
182
lint lock_copy = lock->lock_word;
183
/* If there is a reader, lock_word is not divisible by X_LOCK_DECR */
184
if (lock_copy > 0 || (-lock_copy) % X_LOCK_DECR != 0) {
187
return(((-lock_copy) / X_LOCK_DECR) + 1);
190
/******************************************************************//**
191
Two different implementations for decrementing the lock_word of a rw_lock:
192
one for systems supporting atomic operations, one for others. This does
193
does not support recusive x-locks: they should be handled by the caller and
194
need not be atomic since they are performed by the current lock holder.
195
Returns true if the decrement was made, false if not.
196
@return TRUE if decr occurs */
199
rw_lock_lock_word_decr(
200
/*===================*/
201
rw_lock_t* lock, /*!< in/out: rw-lock */
202
ulint amount) /*!< in: amount to decrement */
204
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
205
lint local_lock_word = lock->lock_word;
206
while (local_lock_word > 0) {
207
if (os_compare_and_swap_lint(&lock->lock_word,
209
local_lock_word - amount)) {
212
local_lock_word = lock->lock_word;
215
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
216
ibool success = FALSE;
217
mutex_enter(&(lock->mutex));
218
if (lock->lock_word > 0) {
219
lock->lock_word -= amount;
222
mutex_exit(&(lock->mutex));
224
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
227
/******************************************************************//**
228
Increments lock_word the specified amount and returns new value.
229
@return lock->lock_word after increment */
232
rw_lock_lock_word_incr(
233
/*===================*/
234
rw_lock_t* lock, /*!< in/out: rw-lock */
235
ulint amount) /*!< in: amount of increment */
237
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
238
return(os_atomic_increment_lint(&lock->lock_word, amount));
239
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
240
lint local_lock_word;
242
mutex_enter(&(lock->mutex));
244
lock->lock_word += amount;
245
local_lock_word = lock->lock_word;
247
mutex_exit(&(lock->mutex));
249
return(local_lock_word);
250
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
253
/******************************************************************//**
254
This function sets the lock->writer_thread and lock->recursive fields.
255
For platforms where we are using atomic builtins instead of lock->mutex
256
it sets the lock->writer_thread field using atomics to ensure memory
257
ordering. Note that it is assumed that the caller of this function
258
effectively owns the lock i.e.: nobody else is allowed to modify
259
lock->writer_thread at this point in time.
260
The protocol is that lock->writer_thread MUST be updated BEFORE the
261
lock->recursive flag is set. */
264
rw_lock_set_writer_id_and_recursion_flag(
265
/*=====================================*/
266
rw_lock_t* lock, /*!< in/out: lock to work on */
267
ibool recursive) /*!< in: TRUE if recursion
270
os_thread_id_t curr_thread = os_thread_get_curr_id();
272
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
273
os_thread_id_t local_thread;
276
/* Prevent Valgrind warnings about writer_thread being
277
uninitialized. It does not matter if writer_thread is
278
uninitialized, because we are comparing writer_thread against
279
itself, and the operation should always succeed. */
280
UNIV_MEM_VALID(&lock->writer_thread, sizeof lock->writer_thread);
282
local_thread = lock->writer_thread;
283
success = os_compare_and_swap_thread_id(
284
&lock->writer_thread, local_thread, curr_thread);
286
lock->recursive = recursive;
288
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
290
mutex_enter(&lock->mutex);
291
lock->writer_thread = curr_thread;
292
lock->recursive = recursive;
293
mutex_exit(&lock->mutex);
295
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
298
/******************************************************************//**
299
Low-level function which tries to lock an rw-lock in s-mode. Performs no
301
@return TRUE if success */
306
rw_lock_t* lock, /*!< in: pointer to rw-lock */
307
ulint pass __attribute__((unused)),
308
/*!< in: pass value; != 0, if the lock will be
309
passed to another thread to unlock */
310
const char* file_name, /*!< in: file name where lock requested */
311
ulint line) /*!< in: line where requested */
314
/* TODO: study performance of UNIV_LIKELY branch prediction hints. */
315
if (!rw_lock_lock_word_decr(lock, 1)) {
316
/* Locking did not succeed */
320
#ifdef UNIV_SYNC_DEBUG
321
rw_lock_add_debug_info(lock, pass, RW_LOCK_SHARED, file_name, line);
323
/* These debugging values are not set safely: they may be incorrect
324
or even refer to a line that is invalid for the file name. */
325
lock->last_s_file_name = file_name;
326
lock->last_s_line = line;
328
return(TRUE); /* locking succeeded */
331
/******************************************************************//**
332
Low-level function which locks an rw-lock in s-mode when we know that it
333
is possible and none else is currently accessing the rw-lock structure.
334
Then we can do the locking without reserving the mutex. */
337
rw_lock_s_lock_direct(
338
/*==================*/
339
rw_lock_t* lock, /*!< in/out: rw-lock */
340
const char* file_name, /*!< in: file name where requested */
341
ulint line) /*!< in: line where lock requested */
343
ut_ad(lock->lock_word == X_LOCK_DECR);
345
/* Indicate there is a new reader by decrementing lock_word */
348
lock->last_s_file_name = file_name;
349
lock->last_s_line = line;
351
#ifdef UNIV_SYNC_DEBUG
352
rw_lock_add_debug_info(lock, 0, RW_LOCK_SHARED, file_name, line);
356
/******************************************************************//**
357
Low-level function which locks an rw-lock in x-mode when we know that it
358
is not locked and none else is currently accessing the rw-lock structure.
359
Then we can do the locking without reserving the mutex. */
362
rw_lock_x_lock_direct(
363
/*==================*/
364
rw_lock_t* lock, /*!< in/out: rw-lock */
365
const char* file_name, /*!< in: file name where requested */
366
ulint line) /*!< in: line where lock requested */
368
ut_ad(rw_lock_validate(lock));
369
ut_ad(lock->lock_word == X_LOCK_DECR);
371
lock->lock_word -= X_LOCK_DECR;
372
lock->writer_thread = os_thread_get_curr_id();
373
lock->recursive = TRUE;
375
lock->last_x_file_name = file_name;
376
lock->last_x_line = line;
378
#ifdef UNIV_SYNC_DEBUG
379
rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
383
/******************************************************************//**
384
NOTE! Use the corresponding macro, not directly this function! Lock an
385
rw-lock in shared mode for the current thread. If the rw-lock is locked
386
in exclusive mode, or there is an exclusive lock request waiting, the
387
function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for
388
the lock, before suspending the thread. */
393
rw_lock_t* lock, /*!< in: pointer to rw-lock */
394
ulint pass, /*!< in: pass value; != 0, if the lock will
395
be passed to another thread to unlock */
396
const char* file_name,/*!< in: file name where lock requested */
397
ulint line) /*!< in: line where requested */
399
/* NOTE: As we do not know the thread ids for threads which have
400
s-locked a latch, and s-lockers will be served only after waiting
401
x-lock requests have been fulfilled, then if this thread already
402
owns an s-lock here, it may end up in a deadlock with another thread
403
which requests an x-lock here. Therefore, we will forbid recursive
404
s-locking of a latch: the following assert will warn the programmer
405
of the possibility of this kind of a deadlock. If we want to implement
406
safe recursive s-locking, we should keep in a list the thread ids of
407
the threads which have s-locked a latch. This would use some CPU
410
#ifdef UNIV_SYNC_DEBUG
411
ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); /* see NOTE above */
412
#endif /* UNIV_SYNC_DEBUG */
414
/* TODO: study performance of UNIV_LIKELY branch prediction hints. */
415
if (rw_lock_s_lock_low(lock, pass, file_name, line)) {
417
return; /* Success */
419
/* Did not succeed, try spin wait */
421
rw_lock_s_lock_spin(lock, pass, file_name, line);
427
/******************************************************************//**
428
NOTE! Use the corresponding macro, not directly this function! Lock an
429
rw-lock in exclusive mode for the current thread if the lock can be
430
obtained immediately.
431
@return TRUE if success */
434
rw_lock_x_lock_func_nowait(
435
/*=======================*/
436
rw_lock_t* lock, /*!< in: pointer to rw-lock */
437
const char* file_name,/*!< in: file name where lock requested */
438
ulint line) /*!< in: line where requested */
440
os_thread_id_t curr_thread = os_thread_get_curr_id();
444
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
445
success = os_compare_and_swap_lint(&lock->lock_word, X_LOCK_DECR, 0);
449
mutex_enter(&(lock->mutex));
450
if (lock->lock_word == X_LOCK_DECR) {
454
mutex_exit(&(lock->mutex));
458
rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
460
} else if (lock->recursive
461
&& os_thread_eq(lock->writer_thread, curr_thread)) {
462
/* Relock: this lock_word modification is safe since no other
463
threads can modify (lock, unlock, or reserve) lock_word while
464
there is an exclusive writer and this is the writer thread. */
465
lock->lock_word -= X_LOCK_DECR;
467
ut_ad(((-lock->lock_word) % X_LOCK_DECR) == 0);
473
#ifdef UNIV_SYNC_DEBUG
474
rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
477
lock->last_x_file_name = file_name;
478
lock->last_x_line = line;
480
ut_ad(rw_lock_validate(lock));
485
/******************************************************************//**
486
Releases a shared mode lock. */
489
rw_lock_s_unlock_func(
490
/*==================*/
491
#ifdef UNIV_SYNC_DEBUG
492
ulint pass, /*!< in: pass value; != 0, if the lock may have
493
been passed to another thread to unlock */
495
rw_lock_t* lock) /*!< in/out: rw-lock */
497
ut_ad((lock->lock_word % X_LOCK_DECR) != 0);
499
#ifdef UNIV_SYNC_DEBUG
500
rw_lock_remove_debug_info(lock, pass, RW_LOCK_SHARED);
503
/* Increment lock_word to indicate 1 less reader */
504
if (rw_lock_lock_word_incr(lock, 1) == 0) {
506
/* wait_ex waiter exists. It may not be asleep, but we signal
507
anyway. We do not wake other waiters, because they can't
508
exist without wait_ex waiter and wait_ex waiter goes first.*/
509
os_event_set(lock->wait_ex_event);
510
sync_array_object_signalled(sync_primary_wait_array);
514
ut_ad(rw_lock_validate(lock));
516
#ifdef UNIV_SYNC_PERF_STAT
521
/******************************************************************//**
522
Releases a shared mode lock when we know there are no waiters and none
523
else will access the lock during the time this function is executed. */
526
rw_lock_s_unlock_direct(
527
/*====================*/
528
rw_lock_t* lock) /*!< in/out: rw-lock */
530
ut_ad(lock->lock_word < X_LOCK_DECR);
532
#ifdef UNIV_SYNC_DEBUG
533
rw_lock_remove_debug_info(lock, 0, RW_LOCK_SHARED);
536
/* Decrease reader count by incrementing lock_word */
539
ut_ad(!lock->waiters);
540
ut_ad(rw_lock_validate(lock));
541
#ifdef UNIV_SYNC_PERF_STAT
546
/******************************************************************//**
547
Releases an exclusive mode lock. */
550
rw_lock_x_unlock_func(
551
/*==================*/
552
#ifdef UNIV_SYNC_DEBUG
553
ulint pass, /*!< in: pass value; != 0, if the lock may have
554
been passed to another thread to unlock */
556
rw_lock_t* lock) /*!< in/out: rw-lock */
558
ut_ad((lock->lock_word % X_LOCK_DECR) == 0);
560
/* lock->recursive flag also indicates if lock->writer_thread is
561
valid or stale. If we are the last of the recursive callers
562
then we must unset lock->recursive flag to indicate that the
563
lock->writer_thread is now stale.
564
Note that since we still hold the x-lock we can safely read the
566
if (lock->lock_word == 0) {
567
/* Last caller in a possible recursive chain. */
568
lock->recursive = FALSE;
569
UNIV_MEM_INVALID(&lock->writer_thread,
570
sizeof lock->writer_thread);
573
#ifdef UNIV_SYNC_DEBUG
574
rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
577
if (rw_lock_lock_word_incr(lock, X_LOCK_DECR) == X_LOCK_DECR) {
578
/* Lock is now free. May have to signal read/write waiters.
579
We do not need to signal wait_ex waiters, since they cannot
580
exist when there is a writer. */
582
rw_lock_reset_waiter_flag(lock);
583
os_event_set(lock->event);
584
sync_array_object_signalled(sync_primary_wait_array);
588
ut_ad(rw_lock_validate(lock));
590
#ifdef UNIV_SYNC_PERF_STAT
595
/******************************************************************//**
596
Releases an exclusive mode lock when we know there are no waiters, and
597
none else will access the lock during the time this function is executed. */
600
rw_lock_x_unlock_direct(
601
/*====================*/
602
rw_lock_t* lock) /*!< in/out: rw-lock */
604
/* Reset the exclusive lock if this thread no longer has an x-mode
607
ut_ad((lock->lock_word % X_LOCK_DECR) == 0);
609
#ifdef UNIV_SYNC_DEBUG
610
rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX);
613
if (lock->lock_word == 0) {
614
lock->recursive = FALSE;
615
UNIV_MEM_INVALID(&lock->writer_thread,
616
sizeof lock->writer_thread);
619
lock->lock_word += X_LOCK_DECR;
621
ut_ad(!lock->waiters);
622
ut_ad(rw_lock_validate(lock));
624
#ifdef UNIV_SYNC_PERF_STAT
629
#ifdef UNIV_PFS_RWLOCK
631
/******************************************************************//**
632
Performance schema instrumented wrap function for rw_lock_create_func().
633
NOTE! Please use the corresponding macro rw_lock_create(), not directly
637
pfs_rw_lock_create_func(
638
/*====================*/
639
mysql_pfs_key_t key, /*!< in: key registered with
640
performance schema */
641
rw_lock_t* lock, /*!< in: pointer to memory */
643
# ifdef UNIV_SYNC_DEBUG
644
ulint level, /*!< in: level */
645
# endif /* UNIV_SYNC_DEBUG */
646
const char* cmutex_name, /*!< in: mutex name */
647
# endif /* UNIV_DEBUG */
648
const char* cfile_name, /*!< in: file name where created */
649
ulint cline) /*!< in: file line where created */
651
/* Initialize the rwlock for performance schema */
652
lock->pfs_psi = (PSI_server && PFS_IS_INSTRUMENTED(key))
653
? PSI_server->init_rwlock(key, lock)
656
/* The actual function to initialize an rwlock */
657
rw_lock_create_func(lock,
659
# ifdef UNIV_SYNC_DEBUG
661
# endif /* UNIV_SYNC_DEBUG */
663
# endif /* UNIV_DEBUG */
667
/******************************************************************//**
668
Performance schema instrumented wrap function for rw_lock_x_lock_func()
669
NOTE! Please use the corresponding macro rw_lock_x_lock(), not directly
673
pfs_rw_lock_x_lock_func(
674
/*====================*/
675
rw_lock_t* lock, /*!< in: pointer to rw-lock */
676
ulint pass, /*!< in: pass value; != 0, if the lock will
677
be passed to another thread to unlock */
678
const char* file_name,/*!< in: file name where lock requested */
679
ulint line) /*!< in: line where requested */
681
struct PSI_rwlock_locker* locker = NULL;
682
PSI_rwlock_locker_state state;
684
/* Record the entry of rw x lock request in performance schema */
685
if (UNIV_LIKELY(PSI_server && lock->pfs_psi)) {
686
locker = PSI_server->get_thread_rwlock_locker(
687
&state, lock->pfs_psi, PSI_RWLOCK_WRITELOCK);
690
PSI_server->start_rwlock_wrwait(locker,
695
rw_lock_x_lock_func(lock, pass, file_name, line);
698
PSI_server->end_rwlock_wrwait(locker, 0);
701
/******************************************************************//**
702
Performance schema instrumented wrap function for
703
rw_lock_x_lock_func_nowait()
704
NOTE! Please use the corresponding macro rw_lock_x_lock_func(),
705
not directly this function!
706
@return TRUE if success */
709
pfs_rw_lock_x_lock_func_nowait(
710
/*===========================*/
711
rw_lock_t* lock, /*!< in: pointer to rw-lock */
712
const char* file_name,/*!< in: file name where lock
714
ulint line) /*!< in: line where requested */
716
struct PSI_rwlock_locker* locker = NULL;
717
PSI_rwlock_locker_state state;
720
/* Record the entry of rw x lock request in performance schema */
721
if (UNIV_LIKELY(PSI_server && lock->pfs_psi)) {
722
locker = PSI_server->get_thread_rwlock_locker(
723
&state, lock->pfs_psi, PSI_RWLOCK_WRITELOCK);
726
PSI_server->start_rwlock_wrwait(locker,
731
ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
734
PSI_server->end_rwlock_wrwait(locker, 0);
739
/******************************************************************//**
740
Performance schema instrumented wrap function for rw_lock_free_func()
741
NOTE! Please use the corresponding macro rw_lock_free(), not directly
745
pfs_rw_lock_free_func(
746
/*==================*/
747
rw_lock_t* lock) /*!< in: pointer to rw-lock */
749
if (UNIV_LIKELY(PSI_server && lock->pfs_psi)) {
750
PSI_server->destroy_rwlock(lock->pfs_psi);
751
lock->pfs_psi = NULL;
754
rw_lock_free_func(lock);
756
/******************************************************************//**
757
Performance schema instrumented wrap function for rw_lock_s_lock_func()
758
NOTE! Please use the corresponding macro rw_lock_s_lock(), not
759
directly this function! */
762
pfs_rw_lock_s_lock_func(
763
/*====================*/
764
rw_lock_t* lock, /*!< in: pointer to rw-lock */
765
ulint pass, /*!< in: pass value; != 0, if the
766
lock will be passed to another
768
const char* file_name,/*!< in: file name where lock
770
ulint line) /*!< in: line where requested */
772
struct PSI_rwlock_locker* locker = NULL;
773
PSI_rwlock_locker_state state;
775
/* Instrumented to inform we are aquiring a shared rwlock */
776
if (UNIV_LIKELY(PSI_server && lock->pfs_psi)) {
777
locker = PSI_server->get_thread_rwlock_locker(
778
&state, lock->pfs_psi, PSI_RWLOCK_READLOCK);
780
PSI_server->start_rwlock_rdwait(locker,
785
rw_lock_s_lock_func(lock, pass, file_name, line);
788
PSI_server->end_rwlock_rdwait(locker, 0);
791
/******************************************************************//**
792
Performance schema instrumented wrap function for rw_lock_s_lock_func()
793
NOTE! Please use the corresponding macro rw_lock_s_lock(), not
794
directly this function!
795
@return TRUE if success */
798
pfs_rw_lock_s_lock_low(
799
/*===================*/
800
rw_lock_t* lock, /*!< in: pointer to rw-lock */
801
ulint pass, /*!< in: pass value; != 0, if the
802
lock will be passed to another
804
const char* file_name, /*!< in: file name where lock requested */
805
ulint line) /*!< in: line where requested */
807
struct PSI_rwlock_locker* locker = NULL;
808
PSI_rwlock_locker_state state;
811
/* Instrumented to inform we are aquiring a shared rwlock */
812
if (UNIV_LIKELY(PSI_server && lock->pfs_psi)) {
813
locker = PSI_server->get_thread_rwlock_locker(
814
&state, lock->pfs_psi, PSI_RWLOCK_READLOCK);
816
PSI_server->start_rwlock_rdwait(locker,
821
ret = rw_lock_s_lock_low(lock, pass, file_name, line);
824
PSI_server->end_rwlock_rdwait(locker, 0);
830
/******************************************************************//**
831
Performance schema instrumented wrap function for rw_lock_x_unlock_func()
832
NOTE! Please use the corresponding macro rw_lock_x_unlock(), not directly
836
pfs_rw_lock_x_unlock_func(
837
/*======================*/
838
#ifdef UNIV_SYNC_DEBUG
839
ulint pass, /*!< in: pass value; != 0, if the
840
lock may have been passed to another
843
rw_lock_t* lock) /*!< in/out: rw-lock */
845
/* Inform performance schema we are unlocking the lock */
846
if (UNIV_LIKELY(PSI_server && lock->pfs_psi)) {
847
PSI_server->unlock_rwlock(lock->pfs_psi);
850
rw_lock_x_unlock_func(
851
#ifdef UNIV_SYNC_DEBUG
857
/******************************************************************//**
858
Performance schema instrumented wrap function for rw_lock_s_unlock_func()
859
NOTE! Please use the corresponding macro pfs_rw_lock_s_unlock(), not
860
directly this function! */
863
pfs_rw_lock_s_unlock_func(
864
/*======================*/
865
#ifdef UNIV_SYNC_DEBUG
866
ulint pass, /*!< in: pass value; != 0, if the
867
lock may have been passed to another
870
rw_lock_t* lock) /*!< in/out: rw-lock */
872
/* Inform performance schema we are unlocking the lock */
873
if (UNIV_LIKELY(PSI_server && lock->pfs_psi)) {
874
PSI_server->unlock_rwlock(lock->pfs_psi);
877
rw_lock_s_unlock_func(
878
#ifdef UNIV_SYNC_DEBUG
884
#endif /* UNIV_PFS_RWLOCK */