1141
1141
sxs->sxs_locker = thd_id;
1144
/* Wait for all the reader to wait! */
1145
while (sxs->sxs_wait_count < sxs->sxs_rlock_count)
1144
/* Wait for all the readers to wait! */
1145
while (sxs->sxs_wait_count < sxs->sxs_rlock_count) {
1146
sxs->sxs_xwaiter = 1;
1148
/* This should not be required, because there is only one thread
1149
* accessing this value. However, the lock fails if this
1150
* is not done with an atomic op.
1152
* This is because threads on other processors have the
1153
* value in processor cache. So they do not
1154
* notice that the value has been set to zero.
1155
* They think it is still 1 and march through
1156
* the barrier (sxs->sxs_xwaiter < sxs->sxs_xlocked) below.
1158
* In the meantime, this X locker has gone on thinking
1161
xt_atomic_tas2(&sxs->sxs_xwaiter, 0);
1148
1164
#ifdef XT_THREAD_LOCK_INFO
1149
1165
xt_thread_lock_info_add_owner(&sxs->sxs_lock_info);
1363
1379
* -----------------------------------------------------------------------
1380
* RECURSIVE R/W LOCK (allows X lockers to lock again)
1383
#ifdef XT_THREAD_LOCK_INFO
1384
void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm, const char *name)
1386
rm->rm_locker = NULL;
1387
rm->rm_lock_count = 0;
1388
xt_init_mutex(self, &rm->rm_mutex, name);
1391
xtPublic void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm)
1393
rm->rm_locker = NULL;
1394
rm->rm_lock_count = 0;
1395
xt_init_mutex(self, &rm->rm_mutex);
1399
xtPublic void xt_recursivemutex_free(XTRecursiveMutexPtr rm)
1401
xt_free_mutex(&rm->rm_mutex);
1402
#ifdef XT_THREAD_LOCK_INFO
1403
xt_thread_lock_info_free(&rm->rm_lock_info);
1407
xtPublic void xt_recursivemutex_lock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1409
if (self != rm->rm_locker) {
1410
xt_lock_mutex(self, &rm->rm_mutex);
1411
rm->rm_locker = self;
1413
rm->rm_lock_count++;
1416
xtPublic void xt_recursivemutex_unlock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1418
ASSERT(self == rm->rm_locker);
1419
ASSERT(rm->rm_lock_count > 0);
1420
rm->rm_lock_count--;
1421
if (!rm->rm_lock_count) {
1422
rm->rm_locker = NULL;
1423
xt_unlock_mutex(self, &rm->rm_mutex);
1428
* -----------------------------------------------------------------------
1429
* RECURSIVE MUTEX (allows lockers to lock again)
1432
#ifdef XT_THREAD_LOCK_INFO
1433
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name)
1435
rrw->rrw_locker = NULL;
1436
rrw->rrw_lock_count = 0;
1437
xt_init_rwlock(self, &rrw->rrw_lock, name);
1440
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw)
1442
rrw->rrw_locker = NULL;
1443
rrw->rrw_lock_count = 0;
1444
xt_init_rwlock(self, &rrw->rrw_lock);
1448
void xt_recurrwlock_free(XTRecurRWLockPtr rrw)
1450
xt_free_rwlock(&rrw->rrw_lock);
1451
#ifdef XT_THREAD_LOCK_INFO
1452
xt_thread_lock_info_free(&rrw->rrw_lock_info);
1456
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1458
if (self != rrw->rrw_locker) {
1459
xt_xlock_rwlock(self, &rrw->rrw_lock);
1460
rrw->rrw_locker = self;
1462
rrw->rrw_lock_count++;
1465
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw)
1467
xt_slock_rwlock(self, &rrw->rrw_lock);
1470
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw)
1472
xt_slock_rwlock_ns(&rrw->rrw_lock);
1475
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1477
ASSERT(self == rrw->rrw_locker);
1478
ASSERT(rrw->rrw_lock_count > 0);
1479
rrw->rrw_lock_count--;
1480
if (!rrw->rrw_lock_count) {
1481
rrw->rrw_locker = NULL;
1482
xt_unlock_rwlock(self, &rrw->rrw_lock);
1486
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw)
1488
xt_unlock_rwlock(self, &rrw->rrw_lock);
1491
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw)
1493
xt_unlock_rwlock_ns(&rrw->rrw_lock);
1497
* -----------------------------------------------------------------------