~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/pbxt/src/lock_xt.cc

  • Committer: Patrick Crews
  • Date: 2010-09-14 20:21:03 UTC
  • mto: (1771.1.1 pcrews)
  • mto: This revision was merged to the branch mainline in revision 1772.
  • Revision ID: gleebix@gmail.com-20100914202103-1db2n0bshzafep19
Moved transaction_log tests into updated non-publisher-based tree

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/* Copyright (C) 2005 PrimeBase Technologies GmbH
 
1
/* Copyright (c) 2005 PrimeBase Technologies GmbH
2
2
 *
3
3
 * PrimeBase XT
4
4
 *
14
14
 *
15
15
 * You should have received a copy of the GNU General Public License
16
16
 * along with this program; if not, write to the Free Software
17
 
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 
17
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
18
 *
19
19
 * 2008-01-24   Paul McCullagh
20
20
 *
1141
1141
        sxs->sxs_locker = thd_id;
1142
1142
#endif
1143
1143
 
1144
 
        /* Wait for all the readers to wait! */
1145
 
        while (sxs->sxs_wait_count < sxs->sxs_rlock_count) {
1146
 
                sxs->sxs_xwaiter = 1;
1147
 
                xt_yield(); //*
1148
 
                /* This should not be required, because there is only one thread
1149
 
                 * accessing this value. However, the lock fails if this
1150
 
                 * is not done with an atomic op.
1151
 
                 *
1152
 
                 * This is because threads on other processors have the
1153
 
                 * value in processor cache. So they do not
1154
 
                 * notice that the value has been set to zero.
1155
 
                 * They think it is still 1 and march through
1156
 
                 * the barrier (sxs->sxs_xwaiter < sxs->sxs_xlocked) below.
1157
 
                 *
1158
 
                 * In the meantime, this X locker has gone on thinking
1159
 
                 * all is OK.
1160
 
                 */
1161
 
                xt_atomic_tas2(&sxs->sxs_xwaiter, 0);
1162
 
        }
 
1144
        /* Wait for all the reader to wait! */
 
1145
        while (sxs->sxs_wait_count < sxs->sxs_rlock_count)
 
1146
                xt_yield();
1163
1147
 
1164
1148
#ifdef XT_THREAD_LOCK_INFO
1165
1149
        xt_thread_lock_info_add_owner(&sxs->sxs_lock_info);
1171
1155
{
1172
1156
        xt_atomic_inc2(&sxs->sxs_rlock_count);
1173
1157
 
1174
 
        /* Wait as long as the locker is not waiting: */
1175
 
        while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
 
1158
        /* Check if there could be an X locker: */
 
1159
        if (sxs->sxs_xlocked) {
 
1160
                /* I am waiting... */
1176
1161
                xt_atomic_inc2(&sxs->sxs_wait_count);
1177
 
                while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
 
1162
                while (sxs->sxs_xlocked)
1178
1163
                        xt_yield();
1179
 
                }
1180
1164
                xt_atomic_dec2(&sxs->sxs_wait_count);
1181
1165
        }
1182
1166
 
1377
1361
 
1378
1362
/*
1379
1363
 * -----------------------------------------------------------------------
1380
 
 * RECURSIVE R/W LOCK (allows X lockers to lock again)
1381
 
 */
1382
 
 
1383
 
#ifdef XT_THREAD_LOCK_INFO
1384
 
void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm, const char *name)
1385
 
{
1386
 
        rm->rm_locker = NULL;
1387
 
        rm->rm_lock_count = 0;
1388
 
        xt_init_mutex(self, &rm->rm_mutex, name);
1389
 
}
1390
 
#else
1391
 
xtPublic void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm)
1392
 
{
1393
 
        rm->rm_locker = NULL;
1394
 
        rm->rm_lock_count = 0;
1395
 
        xt_init_mutex(self, &rm->rm_mutex);
1396
 
}
1397
 
#endif
1398
 
 
1399
 
xtPublic void xt_recursivemutex_free(XTRecursiveMutexPtr rm)
1400
 
{
1401
 
        xt_free_mutex(&rm->rm_mutex);
1402
 
#ifdef XT_THREAD_LOCK_INFO
1403
 
        xt_thread_lock_info_free(&rm->rm_lock_info);
1404
 
#endif
1405
 
}
1406
 
 
1407
 
xtPublic void xt_recursivemutex_lock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1408
 
{
1409
 
        if (self != rm->rm_locker) {
1410
 
                xt_lock_mutex(self, &rm->rm_mutex);
1411
 
                rm->rm_locker = self;
1412
 
        }
1413
 
        rm->rm_lock_count++;
1414
 
}
1415
 
 
1416
 
xtPublic void xt_recursivemutex_unlock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1417
 
{
1418
 
        ASSERT(self == rm->rm_locker);
1419
 
        ASSERT(rm->rm_lock_count > 0);
1420
 
        rm->rm_lock_count--;
1421
 
        if (!rm->rm_lock_count) {
1422
 
                rm->rm_locker = NULL;
1423
 
                xt_unlock_mutex(self, &rm->rm_mutex);
1424
 
        }
1425
 
}
1426
 
 
1427
 
/*
1428
 
 * -----------------------------------------------------------------------
1429
 
 * RECURSIVE MUTEX (allows lockers to lock again)
1430
 
 */
1431
 
 
1432
 
#ifdef XT_THREAD_LOCK_INFO
1433
 
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name)
1434
 
{
1435
 
        rrw->rrw_locker = NULL;
1436
 
        rrw->rrw_lock_count = 0;
1437
 
        xt_init_rwlock(self, &rrw->rrw_lock, name);
1438
 
}
1439
 
#else
1440
 
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw)
1441
 
{
1442
 
        rrw->rrw_locker = NULL;
1443
 
        rrw->rrw_lock_count = 0;
1444
 
        xt_init_rwlock(self, &rrw->rrw_lock);
1445
 
}
1446
 
#endif
1447
 
 
1448
 
void xt_recurrwlock_free(XTRecurRWLockPtr rrw)
1449
 
{
1450
 
        xt_free_rwlock(&rrw->rrw_lock);
1451
 
#ifdef XT_THREAD_LOCK_INFO
1452
 
        xt_thread_lock_info_free(&rrw->rrw_lock_info);
1453
 
#endif
1454
 
}
1455
 
 
1456
 
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1457
 
{
1458
 
        if (self != rrw->rrw_locker) {
1459
 
                xt_xlock_rwlock(self, &rrw->rrw_lock);
1460
 
                rrw->rrw_locker = self;
1461
 
        }
1462
 
        rrw->rrw_lock_count++;
1463
 
}
1464
 
 
1465
 
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw)
1466
 
{
1467
 
        xt_slock_rwlock(self, &rrw->rrw_lock);
1468
 
}
1469
 
 
1470
 
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw)
1471
 
{
1472
 
        xt_slock_rwlock_ns(&rrw->rrw_lock);
1473
 
}
1474
 
 
1475
 
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1476
 
{
1477
 
        ASSERT(self == rrw->rrw_locker);
1478
 
        ASSERT(rrw->rrw_lock_count > 0);
1479
 
        rrw->rrw_lock_count--;
1480
 
        if (!rrw->rrw_lock_count) {
1481
 
                rrw->rrw_locker = NULL;
1482
 
                xt_unlock_rwlock(self, &rrw->rrw_lock);
1483
 
        }
1484
 
}
1485
 
 
1486
 
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw)
1487
 
{
1488
 
        xt_unlock_rwlock(self, &rrw->rrw_lock);
1489
 
}
1490
 
 
1491
 
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw)
1492
 
{
1493
 
        xt_unlock_rwlock_ns(&rrw->rrw_lock);
1494
 
}
1495
 
 
1496
 
/*
1497
 
 * -----------------------------------------------------------------------
1498
1364
 * UNIT TESTS
1499
1365
 */
1500
1366