1
/* Copyright (C) 2005 PrimeBase Technologies GmbH
5
* This program is free software; you can redistribute it and/or modify
6
* it under the terms of the GNU General Public License as published by
7
* the Free Software Foundation; either version 2 of the License, or
8
* (at your option) any later version.
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19
* 2008-01-24 Paul McCullagh
26
#include "xt_config.h"
35
#include "thread_xt.h"
37
#include "xaction_xt.h"
38
#include "database_xt.h"
42
//#define XT_TRACE_LOCKS
43
//#define CHECK_ROWLOCK_GROUP_CONSISTENCY
47
* This function should never be called. It indicates a link
50
xtPublic void xt_log_atomic_error_and_abort(c_char *func, c_char *file, u_int line)
52
xt_logf(NULL, func, file, line, XT_LOG_ERROR, "%s", "Atomic operations not supported\n");
57
* -----------------------------------------------------------------------
58
* ROW LOCKS, LIST BASED
60
#ifdef XT_USE_LIST_BASED_ROW_LOCKS
62
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
64
* Requires a spin-lock on group->lg_lock!
66
static void check_rowlock_group(XTLockGroupPtr group)
68
XTThreadPtr self = xt_get_self();
72
if (group->lg_lock.spl_locker != self)
75
if (group->lg_list_in_use > group->lg_list_size)
79
XTLockItemPtr item = group->lg_list;
81
for (int i = 0; i < group->lg_list_in_use; i++, item++) {
83
if (!item->li_thread_id)
86
THR_ARRAY_READ_LOCK(&xt_thr_array_resize_lock, self->t_id);
87
if(!xt_thr_array[item->li_thread_id]->st_xact_data)
89
THR_ARRAY_UNLOCK(&xt_thr_array_resize_lock, self->t_id);
91
if(item->li_count > XT_TEMP_LOCK_BYTES)
94
// rows per thread must obey the row_id > prev_row_id + prev_count*group_size rule
95
if (prev_row >= item->li_row_id)
98
// calculate the new prev. row
99
if (item->li_count < XT_TEMP_LOCK_BYTES)
100
prev_row = item->li_row_id + (item->li_count - 1) * XT_ROW_LOCK_GROUP_COUNT;
102
prev_row = item->li_row_id;
107
static int xlock_cmp_row_ids(XTThreadPtr XT_UNUSED(self), register const void *XT_UNUSED(thunk), register const void *a, register const void *b)
109
xtRowID row_id = *((xtTableID *) a);
110
XTLockItemPtr item = (XTLockItemPtr) b;
112
if (row_id < item->li_row_id)
114
if (row_id > item->li_row_id)
119
void XTRowLockList::xt_remove_all_locks(struct XTDatabase *, XTThreadPtr thread)
121
#ifdef XT_TRACE_LOCKS
122
xt_ttracef(xt_get_self(), "remove all locks\n");
128
XTPermRowLockPtr plock;
129
#ifndef XT_USE_TABLE_REF
130
XTOpenTablePtr pot = NULL;
133
thd_id = thread->t_id;
134
plock = (XTPermRowLockPtr) bl_data;
135
for (u_int i=0; i<bl_count; i++) {
136
#ifdef XT_USE_TABLE_REF
137
XTTableHPtr tab = plock->pr_table;
139
if (!xt_db_open_pool_table_ns(&pot, db, plock->pr_tab_id)) {
140
/* Should not happen, but just in case, we just don't
141
* remove the lock. We will probably end up with a deadlock
144
xt_log_and_clear_exception_ns();
148
for (int j=0; j<XT_ROW_LOCK_GROUP_COUNT; j++) {
149
if (plock->pr_group[j]) {
150
/* Go through group j and compact. */
151
#ifndef XT_USE_TABLE_REF
152
XTTableHPtr tab = pot->ot_table;
154
XTLockGroupPtr group;
159
group = &tab->tab_locks.rl_groups[j];
160
xt_spinlock_lock(&group->lg_lock);
161
copy = group->lg_list;
162
item = group->lg_list;
164
for (size_t k=0; k<group->lg_list_in_use; k++) {
165
if (item->li_thread_id != thd_id) {
167
copy->li_row_id = item->li_row_id;
168
copy->li_count = item->li_count;
169
copy->li_thread_id = item->li_thread_id;
174
#ifdef XT_TRACE_LOCKS
176
if (item->li_count == XT_TEMP_LOCK_BYTES)
177
xt_ttracef(xt_get_self(), "remove group %d lock row_id=%d TEMP\n", j, (int) item->li_row_id);
179
xt_ttracef(xt_get_self(), "remove group %d locks row_id=%d (%d)\n", j, (int) item->li_row_id, (int) item->li_count);
184
group->lg_list_in_use = new_count;
185
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
186
check_rowlock_group(group);
188
if (group->lg_wait_queue)
189
tab->tab_locks.rl_grant_locks(group, thread);
191
xt_spinlock_unlock(&group->lg_lock);
193
xt_wakeup_thread_list(thread);
196
#ifdef XT_USE_TABLE_REF
197
xt_heap_release_ns(plock->pr_table);
199
xt_db_return_table_to_pool_ns(pot);
207
#ifdef DEBUG_LOCK_QUEUE
210
void XTRowLocks::rl_check(XTLockWaitPtr no_lw)
212
XTLockGroupPtr group;
213
XTLockWaitPtr lw, lw_prev;
215
for (int i=0; i<XT_ROW_LOCK_GROUP_COUNT; i++) {
216
group = &rl_groups[i];
217
xt_spinlock_lock(&group->lg_lock);
219
lw = group->lg_wait_queue;
224
if (lw->lw_prev != lw_prev)
229
xt_spinlock_unlock(&group->lg_lock);
234
// Depending on platform 'thread->t_id' may not be used by THR_ARRAY_READ_LOCK().
235
xtBool XTRowLocks::rl_lock_row(XTLockGroupPtr group, XTLockWaitPtr lw, XTRowLockListPtr, int *result, XTThreadPtr thread __attribute__((unused)))
239
xtRowID row_id = lw->lw_row_id;
241
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
242
check_rowlock_group(group);
244
if (group->lg_list_size == group->lg_list_in_use) {
245
if (!xt_realloc_ns((void **) &group->lg_list, (group->lg_list_size + 2) * sizeof(XTLockItemRec)))
247
group->lg_list_size += 2;
249
item = (XTLockItemPtr) xt_bsearch(NULL, &row_id, group->lg_list, group->lg_list_in_use, sizeof(XTLockItemRec), &index, NULL, xlock_cmp_row_ids);
251
/* There's no item with this ID, but there could be an item with a range that covers this row */
252
if (!item && group->lg_list_in_use) {
256
item = group->lg_list + index - 1;
258
count = item->li_count;
259
if (item->li_count == XT_TEMP_LOCK_BYTES)
262
if (row_id >= item->li_row_id + count * XT_ROW_LOCK_GROUP_COUNT)
270
/* Item already exists. */
271
if (item->li_thread_id == lw->lw_thread->t_id) {
272
/* Already have a permanent lock: */
273
*result = XT_NO_LOCK;
274
lw->lw_curr_lock = XT_NO_LOCK;
278
* This must be valid because a thread must remove
279
* the locks before it frees its st_xact_data structure,
280
* xt_thr_array entry must also be valid, because
281
* transaction must be ended before the thread is
284
*result = item->li_count == XT_TEMP_LOCK_BYTES ? XT_TEMP_LOCK : XT_PERM_LOCK;
285
THR_ARRAY_READ_LOCK(&xt_thr_array_resize_lock, thread->t_id);
286
tmp_thr = xt_thr_array[item->li_thread_id].td_thread;
287
THR_ARRAY_UNLOCK(&xt_thr_array_resize_lock, thread->t_id);
288
lw->lw_xn_id = tmp_thr->st_xact_data->xd_start_xn_id;
289
lw->lw_curr_lock = *result;
294
XT_MEMMOVE(group->lg_list, &group->lg_list[index+1],
295
&group->lg_list[index], (group->lg_list_in_use - index) * sizeof(XTLockItemRec));
296
group->lg_list[index].li_row_id = row_id;
297
group->lg_list[index].li_count = XT_TEMP_LOCK_BYTES;
298
group->lg_list[index].li_thread_id = lw->lw_thread->t_id;
299
group->lg_list_in_use++;
301
#ifdef XT_TRACE_LOCKS
302
xt_ttracef(ot->ot_thread, "set temp lock row=%d setby=%s\n", (int) row_id, xt_get_self()->t_name);
304
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
305
check_rowlock_group(group);
307
*result = XT_NO_LOCK;
308
lw->lw_ot->ot_temp_row_lock = row_id;
309
lw->lw_curr_lock = XT_NO_LOCK;
313
void XTRowLocks::rl_grant_locks(XTLockGroupPtr group, XTThreadPtr thread)
315
XTLockWaitPtr lw, lw_next, lw_prev;
317
xtThreadID lw_thd_id;
319
thread->st_thread_list_count = 0;
320
lw = group->lg_wait_queue;
322
lw_next = lw->lw_next;
323
lw_prev = lw->lw_prev;
324
lw_thd_id = lw->lw_thread->t_id;
325
/* NOTE: after lw_curr_lock is changed, lw may no longer be referenced
326
* by this function!!!
328
if (!rl_lock_row(group, lw, &lw->lw_thread->st_lock_list, &result, thread)) {
329
/* We transfer the error to the other thread! */
330
XTThreadPtr self = xt_get_self();
332
result = XT_LOCK_ERR;
333
memcpy(&lw->lw_thread->t_exception, &self->t_exception, sizeof(XTExceptionRec));
334
lw->lw_curr_lock = XT_LOCK_ERR;
336
if (result == XT_NO_LOCK || result == XT_LOCK_ERR) {
337
/* Remove from the wait queue: */
339
lw_next->lw_prev = lw_prev;
341
lw_prev->lw_next = lw_next;
342
if (group->lg_wait_queue == lw)
343
group->lg_wait_queue = lw_next;
344
if (group->lg_wait_queue_end == lw)
345
group->lg_wait_queue_end = lw_prev;
346
if (result == XT_NO_LOCK) {
347
/* Add to the thread list: */
348
if (thread->st_thread_list_count == thread->st_thread_list_size) {
349
if (!xt_realloc_ns((void **) &thread->st_thread_list, (thread->st_thread_list_size+1) * sizeof(xtThreadID))) {
350
xt_wakeup_thread(lw_thd_id, thread);
353
thread->st_thread_list_size++;
355
thread->st_thread_list[thread->st_thread_list_count] = lw_thd_id;
356
thread->st_thread_list_count++;
364
void XTRowLocks::xt_cancel_temp_lock(XTLockWaitPtr lw)
366
XTLockGroupPtr group;
368
group = &rl_groups[lw->lw_row_id % XT_ROW_LOCK_GROUP_COUNT];
369
xt_spinlock_lock(&group->lg_lock);
370
if (lw->lw_curr_lock == XT_TEMP_LOCK || lw->lw_curr_lock == XT_PERM_LOCK) {
371
/* In case of XT_LOCK_ERR or XT_NO_LOCK, the lw structure will
372
* no longer be on the wait queue.
374
XTLockWaitPtr lw_next, lw_prev;
376
lw_next = lw->lw_next;
377
lw_prev = lw->lw_prev;
379
/* Remove from the wait queue: */
381
lw_next->lw_prev = lw_prev;
383
lw_prev->lw_next = lw_next;
384
if (group->lg_wait_queue == lw)
385
group->lg_wait_queue = lw_next;
386
if (group->lg_wait_queue_end == lw)
387
group->lg_wait_queue_end = lw_prev;
389
xt_spinlock_unlock(&group->lg_lock);
392
//#define QUEUE_ORDER_FIFO
394
/* Try to lock a row.
395
* This function returns:
396
* XT_NO_LOCK on success.
397
* XT_TEMP_LOCK if there is a temporary lock on the row.
398
* XT_PERM_LOCK if there is a permanent lock in the row.
399
* XT_FAILED an error occured.
401
* If there is a lock on this row, the transaction ID of the
402
* locker is also returned.
404
* The caller must wait if the row is locked. If the lock is
405
* permanent, then the caller must wait for the transaction to
406
* terminate. If the lock is temporary, then the caller must
407
* wait for the transaction to signal that the lock has been
410
xtBool XTRowLocks::xt_set_temp_lock(XTOpenTablePtr ot, XTLockWaitPtr lw, XTRowLockListPtr lock_list)
412
XTLockGroupPtr group;
415
if (ot->ot_temp_row_lock) {
416
/* Check if we don't already have this temp lock: */
417
if (ot->ot_temp_row_lock == lw->lw_row_id) {
418
lw->lw_curr_lock = XT_NO_LOCK;
422
xt_make_lock_permanent(ot, lock_list);
425
/* Add a temporary lock. */
426
group = &rl_groups[lw->lw_row_id % XT_ROW_LOCK_GROUP_COUNT];
427
xt_spinlock_lock(&group->lg_lock);
429
if (!rl_lock_row(group, lw, lock_list, &result, ot->ot_thread)) {
430
xt_spinlock_unlock(&group->lg_lock);
434
if (result != XT_NO_LOCK) {
435
/* Add the thread to the end of the thread queue: */
436
#ifdef QUEUE_ORDER_FIFO
437
if (group->lg_wait_queue_end) {
438
group->lg_wait_queue_end->lw_next = lw;
439
lw->lw_prev = group->lg_wait_queue_end;
442
group->lg_wait_queue = lw;
446
group->lg_wait_queue_end = lw;
448
XTLockWaitPtr pos = group->lg_wait_queue_end;
449
xtXactID xn_id = ot->ot_thread->st_xact_data->xd_start_xn_id;
452
if (pos->lw_thread->st_xact_data->xd_start_xn_id < xn_id)
458
lw->lw_next = pos->lw_next;
460
pos->lw_next->lw_prev = lw;
462
group->lg_wait_queue_end = lw;
466
/* Front of the queue: */
468
lw->lw_next = group->lg_wait_queue;
469
if (group->lg_wait_queue)
470
group->lg_wait_queue->lw_prev = lw;
472
group->lg_wait_queue_end = lw;
473
group->lg_wait_queue = lw;
478
xt_spinlock_unlock(&group->lg_lock);
483
* Remove a temporary lock.
485
* If updated is set to TRUE this means that the row was update.
486
* This means that any thread waiting on the temporary lock will
487
* also have to wait for the transaction to quit before
490
* If the thread were to continue it would just hang again because
491
* it will discover that the transaction has updated the row.
493
* So the 'updated' flag is an optimisation which prevents the
494
* thread from making an unncessary retry.
496
void XTRowLocks::xt_remove_temp_lock(XTOpenTablePtr ot, xtBool updated)
499
XTLockGroupPtr group;
502
xtBool lock_granted = FALSE;
503
xtThreadID locking_thread_id = 0;
505
if (!(row_id = ot->ot_temp_row_lock))
508
group = &rl_groups[row_id % XT_ROW_LOCK_GROUP_COUNT];
509
xt_spinlock_lock(&group->lg_lock);
510
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
511
check_rowlock_group(group);
514
#ifdef XT_TRACE_LOCKS
515
xt_ttracef(xt_get_self(), "remove temp lock %d\n", (int) row_id);
517
item = (XTLockItemPtr) xt_bsearch(NULL, &row_id, group->lg_list, group->lg_list_in_use, sizeof(XTLockItemRec), &index, NULL, xlock_cmp_row_ids);
520
if (item->li_thread_id == ot->ot_thread->t_id &&
521
item->li_count == XT_TEMP_LOCK_BYTES) {
524
/* First check if there is some thread waiting to take over this lock: */
525
lw = group->lg_wait_queue;
527
if (lw->lw_row_id == row_id) {
535
/* Grant the lock just released... */
536
XTLockWaitPtr lw_next, lw_prev;
537
xtXactID locking_xact_id;
539
/* Store this info, lw will soon be untouchable! */
540
lw_next = lw->lw_next;
541
lw_prev = lw->lw_prev;
542
locking_xact_id = lw->lw_thread->st_xact_data->xd_start_xn_id;
543
locking_thread_id = lw->lw_thread->t_id;
545
/* Lock has moved from one thread to the next.
546
* change the thread holding this lock:
548
item->li_thread_id = locking_thread_id;
550
/* Remove from the wait queue: */
552
lw_next->lw_prev = lw_prev;
554
lw_prev->lw_next = lw_next;
555
if (group->lg_wait_queue == lw)
556
group->lg_wait_queue = lw_next;
557
if (group->lg_wait_queue_end == lw)
558
group->lg_wait_queue_end = lw_prev;
560
/* If the thread that release the lock updated the
561
* row then we will have to wait for the transaction
565
lw->lw_row_updated = TRUE;
566
lw->lw_updating_xn_id = ot->ot_thread->st_xact_data->xd_start_xn_id;
569
/* The thread has the lock now: */
570
lw->lw_ot->ot_temp_row_lock = row_id;
571
lw->lw_curr_lock = XT_NO_LOCK;
573
/* Everyone after this that is waiting for the same lock is
574
* now waiting for a different transaction:
578
if (lw->lw_row_id == row_id) {
579
lw->lw_xn_id = locking_xact_id;
580
lw->lw_curr_lock = XT_TEMP_LOCK;
586
/* Remove the lock: */
587
XT_MEMMOVE(group->lg_list, &group->lg_list[index],
588
&group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
589
group->lg_list_in_use--;
593
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
594
check_rowlock_group(group);
596
xt_spinlock_unlock(&group->lg_lock);
598
ot->ot_temp_row_lock = 0;
600
xt_wakeup_thread(locking_thread_id, ot->ot_thread);
603
xtBool XTRowLocks::xt_make_lock_permanent(XTOpenTablePtr ot, XTRowLockListPtr lock_list)
606
XTLockGroupPtr group;
610
if (!(row_id = ot->ot_temp_row_lock))
613
#ifdef XT_TRACE_LOCKS
614
xt_ttracef(xt_get_self(), "make lock perm %d\n", (int) ot->ot_temp_row_lock);
617
/* Add to the lock list: */
618
XTPermRowLockPtr locks = (XTPermRowLockPtr) lock_list->bl_data;
619
for (unsigned i=0; i<lock_list->bl_count; i++) {
620
#ifdef XT_USE_TABLE_REF
621
if (locks->pr_table == ot->ot_table) {
623
if (locks->pr_tab_id == ot->ot_table->tab_id) {
625
locks->pr_group[row_id % XT_ROW_LOCK_GROUP_COUNT] = 1;
631
/* Add new to lock list: */
633
XTPermRowLockRec perm_lock;
635
#ifdef XT_USE_TABLE_REF
636
perm_lock.pr_table = ot->ot_table;
637
xt_heap_reference_ns(perm_lock.pr_table);
639
perm_lock.pr_tab_id = ot->ot_table->tab_id;
641
memset(perm_lock.pr_group, 0, XT_ROW_LOCK_GROUP_COUNT);
642
perm_lock.pr_group[row_id % XT_ROW_LOCK_GROUP_COUNT] = 1;
643
if (!xt_bl_append(NULL, lock_list, &perm_lock)) {
644
xt_remove_temp_lock(ot, FALSE);
650
group = &rl_groups[row_id % XT_ROW_LOCK_GROUP_COUNT];
651
xt_spinlock_lock(&group->lg_lock);
653
item = (XTLockItemPtr) xt_bsearch(NULL, &row_id, group->lg_list, group->lg_list_in_use, sizeof(XTLockItemRec), &index, NULL, xlock_cmp_row_ids);
655
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
656
check_rowlock_group(group);
659
/* Lock exists (it should!). */
660
if (item->li_thread_id == ot->ot_thread->t_id &&
661
item->li_count == XT_TEMP_LOCK_BYTES) {
663
group->lg_list[index-1].li_thread_id == ot->ot_thread->t_id &&
664
group->lg_list[index-1].li_count < XT_TEMP_LOCK_BYTES-2 &&
665
group->lg_list[index-1].li_row_id == row_id - (XT_ROW_LOCK_GROUP_COUNT * group->lg_list[index-1].li_count)) {
666
group->lg_list[index-1].li_count++;
667
/* Combine with the left: */
668
if (index + 1 < group->lg_list_in_use &&
669
group->lg_list[index+1].li_thread_id == ot->ot_thread->t_id &&
670
group->lg_list[index+1].li_count != XT_TEMP_LOCK_BYTES &&
671
group->lg_list[index+1].li_row_id == row_id + XT_ROW_LOCK_GROUP_COUNT) {
672
/* And combine with the right */
673
u_int left = group->lg_list[index-1].li_count + group->lg_list[index+1].li_count;
676
if (left > XT_TEMP_LOCK_BYTES-1) {
677
right = left - (XT_TEMP_LOCK_BYTES-1);
678
left = XT_TEMP_LOCK_BYTES-1;
683
group->lg_list[index-1].li_count = left;
685
/* There is something left over on the right: */
686
group->lg_list[index+1].li_count = right;
687
group->lg_list[index+1].li_row_id = group->lg_list[index-1].li_row_id + left * XT_ROW_LOCK_GROUP_COUNT;
688
XT_MEMMOVE(group->lg_list, &group->lg_list[index],
689
&group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
690
group->lg_list_in_use--;
693
XT_MEMMOVE(group->lg_list, &group->lg_list[index],
694
&group->lg_list[index+2], (group->lg_list_in_use - index - 2) * sizeof(XTLockItemRec));
695
group->lg_list_in_use -= 2;
699
XT_MEMMOVE(group->lg_list, &group->lg_list[index],
700
&group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
701
group->lg_list_in_use--;
704
else if (index + 1 < group->lg_list_in_use &&
705
group->lg_list[index+1].li_thread_id == ot->ot_thread->t_id &&
706
group->lg_list[index+1].li_count < XT_TEMP_LOCK_BYTES-2 &&
707
group->lg_list[index+1].li_row_id == row_id + XT_ROW_LOCK_GROUP_COUNT) {
708
/* Combine with the right: */
709
group->lg_list[index+1].li_count++;
710
group->lg_list[index+1].li_row_id = row_id;
711
XT_MEMMOVE(group->lg_list, &group->lg_list[index],
712
&group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
713
group->lg_list_in_use--;
716
group->lg_list[index].li_count = 1;
719
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
720
check_rowlock_group(group);
722
xt_spinlock_unlock(&group->lg_lock);
724
ot->ot_temp_row_lock = 0;
728
xtBool xt_init_row_locks(XTRowLocksPtr rl)
730
for (int i=0; i<XT_ROW_LOCK_GROUP_COUNT; i++) {
731
xt_spinlock_init_with_autoname(NULL, &rl->rl_groups[i].lg_lock);
732
rl->rl_groups[i].lg_wait_queue = NULL;
733
rl->rl_groups[i].lg_list_size = 0;
734
rl->rl_groups[i].lg_list_in_use = 0;
735
rl->rl_groups[i].lg_list = NULL;
740
void xt_exit_row_locks(XTRowLocksPtr rl)
742
for (int i=0; i<XT_ROW_LOCK_GROUP_COUNT; i++) {
743
xt_spinlock_free(NULL, &rl->rl_groups[i].lg_lock);
744
rl->rl_groups[i].lg_wait_queue = NULL;
745
rl->rl_groups[i].lg_list_size = 0;
746
rl->rl_groups[i].lg_list_in_use = 0;
747
if (rl->rl_groups[i].lg_list) {
748
xt_free_ns(rl->rl_groups[i].lg_list);
749
rl->rl_groups[i].lg_list = NULL;
755
* -----------------------------------------------------------------------
756
* ROW LOCKS, HASH BASED
758
#else // XT_USE_LIST_BASED_ROW_LOCKS
760
void XTRowLockList::old_xt_remove_all_locks(struct XTDatabase *db, xtThreadID thd_id)
762
#ifdef XT_TRACE_LOCKS
763
xt_ttracef(xt_get_self(), "remove all locks\n");
770
XTPermRowLockPtr plock;
771
XTOpenTablePtr pot = NULL;
773
plock = (XTPermRowLockPtr) &bl_data[bl_count * bl_item_size];
774
for (u_int i=0; i<bl_count; i++) {
776
pgroup = plock->pr_group;
777
ptab_id = plock->pr_tab_id;
779
if (pot->ot_table->tab_id == ptab_id)
781
xt_db_return_table_to_pool_ns(pot);
785
if (!xt_db_open_pool_table_ns(&pot, db, ptab_id)) {
786
/* Should not happen, but just in case, we just don't
787
* remove the lock. We will probably end up with a deadlock
790
xt_log_and_clear_exception_ns();
791
goto skip_remove_lock;
794
/* Can happen of the table has been dropped: */
795
goto skip_remove_lock;
798
#ifdef XT_TRACE_LOCKS
799
xt_ttracef(xt_get_self(), "remove lock group=%d\n", pgroup);
801
pot->ot_table->tab_locks.tab_row_locks[pgroup] = NULL;
802
pot->ot_table->tab_locks.tab_lock_perm[pgroup] = 0;
808
xt_db_return_table_to_pool_ns(pot);
811
/* Try to lock a row.
812
* This function returns:
813
* XT_NO_LOCK on success.
814
* XT_TEMP_LOCK if there is a temporary lock on the row.
815
* XT_PERM_LOCK if there is a permanent lock in the row.
817
* If there is a lock on this row, the transaction ID of the
818
* locker is also returned.
820
* The caller must wait if the row is locked. If the lock is
821
* permanent, then the caller must wait for the transaction to
822
* terminate. If the lock is temporary, then the caller must
823
* wait for the transaction to signal that the lock has been
826
int XTRowLocks::old_xt_set_temp_lock(XTOpenTablePtr ot, xtRowID row, xtXactID *xn_id, XTRowLockListPtr lock_list)
829
XTXactDataPtr xact, my_xact;
831
if (ot->ot_temp_row_lock) {
832
/* Check if we don't already have this temp lock: */
833
if (ot->ot_temp_row_lock == row) {
834
gl->lw_curr_lock = XT_NO_LOCK;
838
xt_make_lock_permanent(ot, lock_list);
841
my_xact = ot->ot_thread->st_xact_data;
842
group = row % XT_ROW_LOCK_COUNT;
843
if ((xact = tab_row_locks[group])) {
846
*xn_id = xact->xd_start_xn_id;
847
return tab_lock_perm[group] ? XT_PERM_LOCK : XT_TEMP_LOCK;
850
tab_row_locks[row % XT_ROW_LOCK_COUNT] = my_xact;
852
#ifdef XT_TRACE_LOCKS
853
xt_ttracef(xt_get_self(), "set temp lock %d group=%d for %s\n", (int) row, (int) row % XT_ROW_LOCK_COUNT, ot->ot_thread->t_name);
855
ot->ot_temp_row_lock = row;
859
/* Just check if there is a lock on the row.
860
* This function returns:
861
* XT_NO_LOCK if there is no lock.
862
* XT_TEMP_LOCK if there is a temporary lock on the row.
863
* XT_PERM_LOCK if a lock is a permanent lock in the row.
865
int XTRowLocks::old_xt_is_locked(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id)
870
group = row % XT_ROW_LOCK_COUNT;
871
if ((xact = tab_row_locks[group])) {
872
if (xact == ot->ot_thread->st_xact_data)
874
*xn_id = xact->xd_start_xn_id;
875
if (tab_lock_perm[group])
882
void XTRowLocks::old_xt_remove_temp_lock(XTOpenTablePtr ot)
885
XTXactDataPtr xact, my_xact;
887
if (!ot->ot_temp_row_lock)
890
my_xact = ot->ot_thread->st_xact_data;
891
group = ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT;
892
#ifdef XT_TRACE_LOCKS
893
xt_ttracef(xt_get_self(), "remove temp lock %d group=%d\n", (int) ot->ot_temp_row_lock, (int) ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT);
895
ot->ot_temp_row_lock = 0;
896
if ((xact = tab_row_locks[group])) {
898
tab_row_locks[group] = NULL;
901
if (ot->ot_table->tab_db->db_xn_wait_count)
902
xt_xn_wakeup_transactions(ot->ot_table->tab_db, ot->ot_thread);
905
xtBool XTRowLocks::old_xt_make_lock_permanent(XTOpenTablePtr ot, XTRowLockListPtr lock_list)
909
if (!ot->ot_temp_row_lock)
912
#ifdef XT_TRACE_LOCKS
913
xt_ttracef(xt_get_self(), "make lock perm %d group=%d\n", (int) ot->ot_temp_row_lock, (int) ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT);
915
/* Check if the lock is already permanent: */
916
group = ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT;
917
if (!tab_lock_perm[group]) {
918
XTPermRowLockRec plock;
920
plock.pr_tab_id = ot->ot_table->tab_id;
921
plock.pr_group = group;
922
if (!xt_bl_append(NULL, lock_list, &plock)) {
923
xt_remove_temp_lock(ot);
926
tab_lock_perm[group] = 1;
929
ot->ot_temp_row_lock = 0;
933
/* Release this lock, and all locks gained after this lock
936
* The locks are only released temporarily. The will be regained
937
* below using regain locks.
940
* XT_NO_LOCK if no lock is released.
941
* XT_PERM_LOCK if a lock is released.
943
* Note that only permanent locks can be released in this way.
944
* So if the thread has a temporary lock, it will first be made
948
* The idea of the releasing locks comes from the fact that each
949
* lock, locks a group of records.
950
* So if T1 has a lock (e.g. when doing SELECT FOR UPDATE),
951
* and then encounters an updated record x
952
* from T2, and it must wait for T2, it firsts releases the
953
* lock, just in case T2 tries to gain a lock on another
954
* record y in the same group, which will cause it to
957
* However, there are several problems with releasing
959
* - It can cause a "live-lock", where another transation
960
* keeps getting in before.
961
* - It may not solve the problem in all cases because
962
* the SELECT FOR UPDATE has locked other record groups
963
* before it encountered record x.
964
* - Further problems occur when locks are granted by
966
* T1 waits for T2, because it has a lock on record x
967
* T2 releases the lock because it must wait for T3
968
* T1 is granted the lock (but does not know about this yet)
969
* T2 tries to regain lock (after T3 quits) and
970
* must wait for T1 - DEADLOCK
972
* In general, it does not make sense to release locks
973
* when it can be granted again by a callback.
975
* TODO: 2 possible solutions:
976
* - Do not lock groups, lock rows.
977
* UPDATE INTENSION ROW LOCK
978
* - Use multiple lock types:
979
* UPDATE INTENSION LOCK (required first)
980
* SHARED UPDATE LOCK (used by INSERT or DELETE)
981
* EXCLUSIVE UPDATE LOCK (used by SELECT FOR UPDATE)
983
* Temporary solution. Do not release any locks.
984
int XTRowLocks::xt_release_locks(struct XTOpenTable *ot, xtRowID row, XTRowLockListPtr lock_list)
988
* Regain a lock previously held. This function regains locks
989
* released by xt_release_locks().
991
* It will return lock_type and xn_id if the row is locked, and therefore
992
* regain cannot continue. In this case, the caller must wait.
993
* It returns XT_NO_LOCK if there are no more locks to be regained.
995
* Locks are always regained in the order in which they were originally
997
xtBool XTRowLocks::xt_regain_locks(struct XTOpenTable *ot, int *lock_type, xtXactID *xn_id, XTRowLockListPtr lock_list)
1000
xtBool old_xt_init_row_locks(XTRowLocksPtr rl)
1002
memset(rl->tab_lock_perm, 0, XT_ROW_LOCK_COUNT);
1003
memset(rl->tab_row_locks, 0, XT_ROW_LOCK_COUNT * sizeof(XTXactDataPtr));
1007
void old_xt_exit_row_locks(XTRowLocksPtr XT_UNUSED(rl))
1011
#endif // XT_USE_LIST_BASED_ROW_LOCKS
1013
xtPublic xtBool xt_init_row_lock_list(XTRowLockListPtr lock_list)
1015
lock_list->bl_item_size = sizeof(XTPermRowLockRec);
1016
lock_list->bl_size = 0;
1017
lock_list->bl_count = 0;
1018
lock_list->bl_data = NULL;
1022
xtPublic void xt_exit_row_lock_list(XTRowLockListPtr lock_list)
1024
xt_bl_set_size(NULL, lock_list, 0);
1028
* -----------------------------------------------------------------------
1032
#ifdef XT_THREAD_LOCK_INFO
1033
xtPublic void xt_spinlock_init(XTThreadPtr self, XTSpinLockPtr spl, const char *n)
1035
xtPublic void xt_spinlock_init(XTThreadPtr self, XTSpinLockPtr spl)
1040
#ifdef XT_NO_ATOMICS
1041
xt_init_mutex_with_autoname(self, &spl->spl_mutex);
1044
spl->spl_locker = 0;
1046
#ifdef XT_THREAD_LOCK_INFO
1048
xt_thread_lock_info_init(&spl->spl_lock_info, spl);
1052
xtPublic void xt_spinlock_free(XTThreadPtr XT_UNUSED(self), XTSpinLockPtr spl)
1055
#ifdef XT_NO_ATOMICS
1056
xt_free_mutex(&spl->spl_mutex);
1058
#ifdef XT_THREAD_LOCK_INFO
1059
xt_thread_lock_info_free(&spl->spl_lock_info);
1063
xtPublic xtBool xt_spinlock_spin(XTSpinLockPtr spl)
1065
volatile xtWord4 *lck = &spl->spl_lock;
1068
for (int i=0; i<10; i++) {
1069
/* Check the lock variable: */
1071
/* Try to get the lock: */
1072
if (!xt_spinlock_set(spl))
1086
xtPublic void xt_spinlock_set_thread(XTSpinLockPtr spl)
1088
spl->spl_locker = xt_get_self();
1093
* -----------------------------------------------------------------------
1094
* READ/WRITE SPIN LOCK
1096
* An extremely genius very fast read/write lock based on atomics!
1099
#ifdef XT_THREAD_LOCK_INFO
1100
xtPublic void xt_spinxslock_init(struct XTThread *XT_UNUSED(self), XTSpinXSLockPtr sxs, const char *name)
1102
xtPublic void xt_spinxslock_init(struct XTThread *XT_UNUSED(self), XTSpinXSLockPtr sxs)
1105
sxs->sxs_xlocked = 0;
1106
sxs->sxs_rlock_count = 0;
1107
sxs->sxs_wait_count = 0;
1109
sxs->sxs_locker = 0;
1111
#ifdef XT_THREAD_LOCK_INFO
1112
sxs->sxs_name = name;
1113
xt_thread_lock_info_init(&sxs->sxs_lock_info, sxs);
1117
xtPublic void xt_spinxslock_free(struct XTThread *XT_UNUSED(self), XTSpinXSLockPtr sxs)
1119
#ifdef XT_THREAD_LOCK_INFO
1120
xt_thread_lock_info_free(&sxs->sxs_lock_info);
1126
xtPublic xtBool xt_spinxslock_xlock(XTSpinXSLockPtr sxs, xtBool try_lock, xtThreadID XT_NDEBUG_UNUSED(thd_id))
1128
register xtWord2 set;
1130
/* Wait for exclusive locker: */
1132
set = xt_atomic_tas2(&sxs->sxs_xlocked, 1);
1141
sxs->sxs_locker = thd_id;
1144
/* Wait for all the readers to wait! */
1145
while (sxs->sxs_wait_count < sxs->sxs_rlock_count) {
1146
sxs->sxs_xwaiter = 1;
1148
/* This should not be required, because there is only one thread
1149
* accessing this value. However, the lock fails if this
1150
* is not done with an atomic op.
1152
* This is because threads on other processors have the
1153
* value in processor cache. So they do not
1154
* notice that the value has been set to zero.
1155
* They think it is still 1 and march through
1156
* the barrier (sxs->sxs_xwaiter < sxs->sxs_xlocked) below.
1158
* In the meantime, this X locker has gone on thinking
1161
xt_atomic_tas2(&sxs->sxs_xwaiter, 0);
1164
#ifdef XT_THREAD_LOCK_INFO
1165
xt_thread_lock_info_add_owner(&sxs->sxs_lock_info);
1170
xtPublic xtBool xt_spinxslock_slock(XTSpinXSLockPtr sxs)
1172
xt_atomic_inc2(&sxs->sxs_rlock_count);
1174
/* Wait as long as the locker is not waiting: */
1175
while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
1176
xt_atomic_inc2(&sxs->sxs_wait_count);
1177
while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
1180
xt_atomic_dec2(&sxs->sxs_wait_count);
1183
#ifdef XT_THREAD_LOCK_INFO
1184
xt_thread_lock_info_add_owner(&sxs->sxs_lock_info);
1189
xtPublic xtBool xt_spinxslock_unlock(XTSpinXSLockPtr sxs, xtBool xlocked)
1193
ASSERT_NS(sxs->sxs_locker && sxs->sxs_xlocked);
1194
sxs->sxs_locker = 0;
1196
sxs->sxs_xlocked = 0;
1200
ASSERT_NS(sxs->sxs_rlock_count > 0);
1202
xt_atomic_dec2(&sxs->sxs_rlock_count);
1205
#ifdef XT_THREAD_LOCK_INFO
1206
xt_thread_lock_info_release_owner(&sxs->sxs_lock_info);
1212
* -----------------------------------------------------------------------
1213
* FAST READ/WRITE LOCK (BASED ON FAST MUTEX)
1216
#ifdef XT_THREAD_LOCK_INFO
1217
xtPublic void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm, const char *name)
1219
xtPublic void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm)
1222
xt_init_mutex_with_autoname(self, &xsm->xsm_lock);
1223
xt_init_cond(self, &xsm->xsm_xcond);
1224
xt_init_cond(self, &xsm->xsm_rcond);
1225
xsm->xsm_xlocker = 0;
1226
xsm->xsm_rlock_count = 0;
1227
xsm->xsm_rwait_count = 0;
1229
xsm->xsm_locker = 0;
1231
#ifdef XT_THREAD_LOCK_INFO
1232
xsm->xsm_name = name;
1233
xt_thread_lock_info_init(&xsm->xsm_lock_info, xsm);
1237
xtPublic void xt_xsmutex_free(struct XTThread *XT_UNUSED(self), XTMutexXSLockPtr xsm)
1239
xt_free_mutex(&xsm->xsm_lock);
1240
xt_free_cond(&xsm->xsm_xcond);
1241
xt_free_cond(&xsm->xsm_rcond);
1242
#ifdef XT_THREAD_LOCK_INFO
1243
xt_thread_lock_info_free(&xsm->xsm_lock_info);
1247
xtPublic xtBool xt_xsmutex_xlock(XTMutexXSLockPtr xsm, xtThreadID thd_id)
1249
xt_lock_mutex_ns(&xsm->xsm_lock);
1251
xt_atomic_inc2(&xsm->xsm_xwait_count);
1253
/* Wait for exclusive locker: */
1254
while (xsm->xsm_xlocker) {
1255
if (!xt_timed_wait_cond_ns(&xsm->xsm_xcond, &xsm->xsm_lock, 10000)) {
1256
xsm->xsm_xwait_count--;
1257
xt_unlock_mutex_ns(&xsm->xsm_lock);
1262
/* GOTCHA: You would think this is not necessary...
1263
* But is does not always work, if a normal insert is used.
1264
* The reason is, I guess, on MMP the assignment is not
1265
* always immediately visible to other processors, because they
1266
* have old versions of this variable in there cache.
1268
* But this is required, because the locking mechanism is based
1270
* Locker: sets xlocker, tests rlock_count
1271
* Reader: incs rlock_count, tests xlocker
1273
* The test, in both cases, may not read stale values.
1274
* volatile does not help, because this just turns compiler
1275
* optimisations off.
1277
xt_atomic_set4(&xsm->xsm_xlocker, thd_id);
1279
/* Wait for all the reader to wait! */
1280
while (xsm->xsm_rwait_count < xsm->xsm_rlock_count) {
1281
/* {RACE-WR_MUTEX} Here as well: */
1282
if (!xt_timed_wait_cond_ns(&xsm->xsm_xcond, &xsm->xsm_lock, 100)) {
1283
xsm->xsm_xwait_count--;
1284
xsm->xsm_xlocker = 0;
1285
xt_unlock_mutex_ns(&xsm->xsm_lock);
1290
xsm->xsm_xwait_count--;
1291
#ifdef XT_THREAD_LOCK_INFO
1292
xt_thread_lock_info_add_owner(&xsm->xsm_lock_info);
1297
xtPublic xtBool xt_xsmutex_slock(XTMutexXSLockPtr xsm, xtThreadID XT_UNUSED(thd_id))
1299
xt_atomic_inc2(&xsm->xsm_rlock_count);
1301
/* Check if there could be an X locker: */
1302
if (xsm->xsm_xlocker) {
1303
/* I am waiting... */
1304
xt_lock_mutex_ns(&xsm->xsm_lock);
1305
xsm->xsm_rwait_count++;
1306
/* Wake up the xlocker: */
1307
if (xsm->xsm_xlocker && xsm->xsm_rwait_count == xsm->xsm_rlock_count) {
1308
if (!xt_broadcast_cond_ns(&xsm->xsm_xcond)) {
1309
xsm->xsm_rwait_count--;
1310
xt_unlock_mutex_ns(&xsm->xsm_lock);
1314
while (xsm->xsm_xlocker) {
1315
if (!xt_timed_wait_cond_ns(&xsm->xsm_rcond, &xsm->xsm_lock, 10000)) {
1316
xsm->xsm_rwait_count--;
1317
xt_unlock_mutex_ns(&xsm->xsm_lock);
1321
xsm->xsm_rwait_count--;
1322
xt_unlock_mutex_ns(&xsm->xsm_lock);
1325
#ifdef XT_THREAD_LOCK_INFO
1326
xt_thread_lock_info_add_owner(&xsm->xsm_lock_info);
1331
xtPublic xtBool xt_xsmutex_unlock(XTMutexXSLockPtr xsm, xtThreadID thd_id)
1333
if (xsm->xsm_xlocker == thd_id) {
1334
xsm->xsm_xlocker = 0;
1335
if (xsm->xsm_rwait_count) {
1336
if (!xt_broadcast_cond_ns(&xsm->xsm_rcond)) {
1337
xt_unlock_mutex_ns(&xsm->xsm_lock);
1341
else if (xsm->xsm_xwait_count) {
1342
/* Wake up any other X or shared lockers: */
1343
if (!xt_broadcast_cond_ns(&xsm->xsm_xcond)) {
1344
xt_unlock_mutex_ns(&xsm->xsm_lock);
1348
xt_unlock_mutex_ns(&xsm->xsm_lock);
1351
/* Taking the advice from {RACE-WR_MUTEX} I do the decrement
1352
* after I have a lock!
1354
if (xsm->xsm_xwait_count) {
1355
xt_lock_mutex_ns(&xsm->xsm_lock);
1356
xt_atomic_dec2(&xsm->xsm_rlock_count);
1357
if (xsm->xsm_xwait_count && xsm->xsm_rwait_count == xsm->xsm_rlock_count) {
1358
/* If the X locker is waiting for me,
1359
* then allow him to continue.
1361
if (!xt_broadcast_cond_ns(&xsm->xsm_xcond)) {
1362
xt_unlock_mutex_ns(&xsm->xsm_lock);
1366
xt_unlock_mutex_ns(&xsm->xsm_lock);
1369
xt_atomic_dec2(&xsm->xsm_rlock_count);
1372
#ifdef XT_THREAD_LOCK_INFO
1373
xt_thread_lock_info_release_owner(&xsm->xsm_lock_info);
1379
* -----------------------------------------------------------------------
1380
* RECURSIVE R/W LOCK (allows X lockers to lock again)
1383
#ifdef XT_THREAD_LOCK_INFO
1384
void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm, const char *name)
1386
rm->rm_locker = NULL;
1387
rm->rm_lock_count = 0;
1388
xt_init_mutex(self, &rm->rm_mutex, name);
1391
xtPublic void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm)
1393
rm->rm_locker = NULL;
1394
rm->rm_lock_count = 0;
1395
xt_init_mutex(self, &rm->rm_mutex);
1399
xtPublic void xt_recursivemutex_free(XTRecursiveMutexPtr rm)
1401
xt_free_mutex(&rm->rm_mutex);
1402
#ifdef XT_THREAD_LOCK_INFO
1403
xt_thread_lock_info_free(&rm->rm_lock_info);
1407
xtPublic void xt_recursivemutex_lock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1409
if (self != rm->rm_locker) {
1410
xt_lock_mutex(self, &rm->rm_mutex);
1411
rm->rm_locker = self;
1413
rm->rm_lock_count++;
1416
xtPublic void xt_recursivemutex_unlock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1418
ASSERT(self == rm->rm_locker);
1419
ASSERT(rm->rm_lock_count > 0);
1420
rm->rm_lock_count--;
1421
if (!rm->rm_lock_count) {
1422
rm->rm_locker = NULL;
1423
xt_unlock_mutex(self, &rm->rm_mutex);
1428
* -----------------------------------------------------------------------
1429
* RECURSIVE MUTEX (allows lockers to lock again)
1432
#ifdef XT_THREAD_LOCK_INFO
1433
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name)
1435
rrw->rrw_locker = NULL;
1436
rrw->rrw_lock_count = 0;
1437
xt_init_rwlock(self, &rrw->rrw_lock, name);
1440
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw)
1442
rrw->rrw_locker = NULL;
1443
rrw->rrw_lock_count = 0;
1444
xt_init_rwlock(self, &rrw->rrw_lock);
1448
void xt_recurrwlock_free(XTRecurRWLockPtr rrw)
1450
xt_free_rwlock(&rrw->rrw_lock);
1451
#ifdef XT_THREAD_LOCK_INFO
1452
xt_thread_lock_info_free(&rrw->rrw_lock_info);
1456
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1458
if (self != rrw->rrw_locker) {
1459
xt_xlock_rwlock(self, &rrw->rrw_lock);
1460
rrw->rrw_locker = self;
1462
rrw->rrw_lock_count++;
1465
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw)
1467
xt_slock_rwlock(self, &rrw->rrw_lock);
1470
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw)
1472
xt_slock_rwlock_ns(&rrw->rrw_lock);
1475
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1477
ASSERT(self == rrw->rrw_locker);
1478
ASSERT(rrw->rrw_lock_count > 0);
1479
rrw->rrw_lock_count--;
1480
if (!rrw->rrw_lock_count) {
1481
rrw->rrw_locker = NULL;
1482
xt_unlock_rwlock(self, &rrw->rrw_lock);
1486
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw)
1488
xt_unlock_rwlock(self, &rrw->rrw_lock);
1491
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw)
1493
xt_unlock_rwlock_ns(&rrw->rrw_lock);
1497
* -----------------------------------------------------------------------
1501
#define JOB_MEMCPY 1
1504
#define JOB_INCREMENT 4
1505
#define JOB_SNOOZE 5
1506
#define JOB_DOUBLE_INC 6
1508
#define LOCK_PTHREAD_RW 1
1509
#define LOCK_PTHREAD_MUTEX 2
1510
#define LOCK_XSMUTEX 3
1511
#define LOCK_SPINXSLOCK 4
1512
#define LOCK_SPINLOCK 5
1514
typedef struct XSLockTest {
1515
u_int xs_interations;
1516
xtBool xs_which_lock;
1517
xtBool xs_which_job;
1518
xtBool xs_debug_print;
1519
xt_rwlock_type xs_plock;
1520
xt_mutex_type xs_mutex;
1521
XTMutexXSLockRec xs_fastrwlock;
1522
XTSpinXSLockRec xs_spinrwlock;
1523
XTSpinLockRec xs_spinlock;
1526
} XSLockTestRec, *XSLockTestPtr;
1528
static void lck_free_thread_data(XTThreadPtr XT_UNUSED(self), void *XT_UNUSED(data))
1532
static void lck_do_job(XTThreadPtr self, int job, XSLockTestPtr data, xtBool reader)
1534
char b1[2048], b2[2048];
1538
memcpy(b1, b2, 2048);
1542
xt_sleep_milli_second(1);
1546
printf("- %s got lock\n", self->t_name);
1547
xt_sleep_milli_second(10);
1554
xt_sleep_milli_second(10);
1557
case JOB_DOUBLE_INC:
1559
if ((data->xs_inc & 1) != 0)
1571
static void *lck_run_dumper(XTThreadPtr self)
1575
while (state != 1) {
1585
static void *lck_run_reader(XTThreadPtr self)
1587
XSLockTestRec *data = (XSLockTestRec *) self->t_data;
1589
if (data->xs_debug_print)
1590
printf("- %s start\n", self->t_name);
1591
for (u_int i=0; i<data->xs_interations; i++) {
1592
if (data->xs_progress && ((i+1) % data->xs_progress) == 0)
1593
printf("- %s %d\n", self->t_name, i+1);
1594
if (data->xs_which_lock == LOCK_PTHREAD_RW) {
1595
xt_slock_rwlock_ns(&data->xs_plock);
1596
lck_do_job(self, data->xs_which_job, data, TRUE);
1597
xt_unlock_rwlock_ns(&data->xs_plock);
1599
else if (data->xs_which_lock == LOCK_SPINXSLOCK) {
1600
xt_spinxslock_slock(&data->xs_spinrwlock);
1601
lck_do_job(self, data->xs_which_job, data, TRUE);
1602
xt_spinxslock_unlock(&data->xs_spinrwlock, FALSE);
1604
else if (data->xs_which_lock == LOCK_XSMUTEX) {
1605
xt_xsmutex_slock(&data->xs_fastrwlock, self->t_id);
1606
lck_do_job(self, data->xs_which_job, data, TRUE);
1607
xt_xsmutex_unlock(&data->xs_fastrwlock, self->t_id);
1612
if (data->xs_debug_print)
1613
printf("- %s stop\n", self->t_name);
1617
static void *lck_run_writer(XTThreadPtr self)
1619
XSLockTestRec *data = (XSLockTestRec *) self->t_data;
1621
if (data->xs_debug_print)
1622
printf("- %s start\n", self->t_name);
1623
for (u_int i=0; i<data->xs_interations; i++) {
1624
if (data->xs_progress && ((i+1) % data->xs_progress) == 0)
1625
printf("- %s %d\n", self->t_name, i+1);
1626
if (data->xs_which_lock == LOCK_PTHREAD_RW) {
1627
xt_xlock_rwlock_ns(&data->xs_plock);
1628
lck_do_job(self, data->xs_which_job, data, FALSE);
1629
xt_unlock_rwlock_ns(&data->xs_plock);
1631
else if (data->xs_which_lock == LOCK_SPINXSLOCK) {
1632
xt_spinxslock_xlock(&data->xs_spinrwlock, FALSE, self->t_id);
1633
lck_do_job(self, data->xs_which_job, data, FALSE);
1634
xt_spinxslock_unlock(&data->xs_spinrwlock, TRUE);
1636
else if (data->xs_which_lock == LOCK_XSMUTEX) {
1637
xt_xsmutex_xlock(&data->xs_fastrwlock, self->t_id);
1638
lck_do_job(self, data->xs_which_job, data, FALSE);
1639
xt_xsmutex_unlock(&data->xs_fastrwlock, self->t_id);
1644
if (data->xs_debug_print)
1645
printf("- %s stop\n", self->t_name);
1649
static void lck_print_test(XSLockTestRec *data)
1651
switch (data->xs_which_lock) {
1652
case LOCK_PTHREAD_RW:
1653
printf("pthread read/write");
1655
case LOCK_PTHREAD_MUTEX:
1656
printf("pthread mutex");
1659
printf("spin mutex");
1661
case LOCK_SPINXSLOCK:
1662
printf("spin read/write lock");
1665
printf("fast x/s mutex");
1669
switch (data->xs_which_job) {
1671
printf(" MEMCPY 2K");
1674
printf(" SLEEP 1/1000s");
1677
printf(" PRINT DEBUG");
1680
printf(" INCREMENT");
1683
printf(" SLEEP 1/100s");
1687
printf(" %d interations", data->xs_interations);
1690
static void *lck_run_mutex_locker(XTThreadPtr self)
1692
XSLockTestRec *data = (XSLockTestRec *) self->t_data;
1694
if (data->xs_debug_print)
1695
printf("- %s start\n", self->t_name);
1696
for (u_int i=0; i<data->xs_interations; i++) {
1697
if (data->xs_progress && ((i+1) % data->xs_progress) == 0)
1698
printf("- %s %d\n", self->t_name, i+1);
1699
if (data->xs_which_lock == LOCK_PTHREAD_MUTEX) {
1700
xt_lock_mutex_ns(&data->xs_mutex);
1701
lck_do_job(self, data->xs_which_job, data, FALSE);
1702
xt_unlock_mutex_ns(&data->xs_mutex);
1704
else if (data->xs_which_lock == LOCK_SPINLOCK) {
1705
xt_spinlock_lock(&data->xs_spinlock);
1706
lck_do_job(self, data->xs_which_job, data, FALSE);
1707
xt_spinlock_unlock(&data->xs_spinlock);
1712
if (data->xs_debug_print)
1713
printf("- %s stop\n", self->t_name);
1717
typedef struct LockThread {
1720
} LockThreadRec, *LockThreadPtr;
1722
static void lck_reader_writer_test(XTThreadPtr self, XSLockTestRec *data, int reader_cnt, int writer_cnt)
1725
LockThreadPtr threads;
1726
int thread_cnt = reader_cnt + writer_cnt;
1729
//XTThreadPtr dumper = xt_create_daemon(self, "DUMPER");
1730
//xt_run_thread(self, dumper, lck_run_dumper);
1732
printf("READ/WRITE TEST: ");
1733
lck_print_test(data);
1734
printf(", %d readers, %d writers\n", reader_cnt, writer_cnt);
1735
threads = (LockThreadPtr) xt_malloc(self, thread_cnt * sizeof(LockThreadRec));
1737
for (int i=0; i<thread_cnt; i++) {
1738
sprintf(buffer, "%s%d", i < reader_cnt ? "READER-" : "WRITER-", i+1);
1739
threads[i].ptr = xt_create_daemon(self, buffer);
1740
threads[i].id = threads[i].ptr->t_id;
1741
xt_set_thread_data(threads[i].ptr, data, lck_free_thread_data);
1744
start = xt_trace_clock();
1745
for (int i=0; i<reader_cnt; i++)
1746
xt_run_thread(self, threads[i].ptr, lck_run_reader);
1747
for (int i=reader_cnt; i<thread_cnt; i++)
1748
xt_run_thread(self, threads[i].ptr, lck_run_writer);
1750
for (int i=0; i<thread_cnt; i++)
1751
xt_wait_for_thread_to_exit(threads[i].id, TRUE);
1752
printf("----- %d reader, %d writer time=%s\n", reader_cnt, writer_cnt, xt_trace_clock_diff(buffer, start));
1754
xt_free(self, threads);
1755
printf("TEST RESULT = %d\n", data->xs_inc);
1757
//xt_wait_for_thread_to_exit(dumper, TRUE);
1760
static void lck_mutex_lock_test(XTThreadPtr self, XSLockTestRec *data, int thread_cnt)
1763
LockThreadPtr threads;
1766
printf("LOCK MUTEX TEST: ");
1767
lck_print_test(data);
1768
printf(", %d threads\n", thread_cnt);
1769
threads = (LockThreadPtr) xt_malloc(self, thread_cnt * sizeof(LockThreadRec));
1771
for (int i=0; i<thread_cnt; i++) {
1772
sprintf(buffer, "THREAD%d", i+1);
1773
threads[i].ptr = xt_create_daemon(self, buffer);
1774
threads[i].id = threads[i].ptr->t_id;
1775
xt_set_thread_data(threads[i].ptr, data, lck_free_thread_data);
1778
start = xt_trace_clock();
1779
for (int i=0; i<thread_cnt; i++)
1780
xt_run_thread(self, threads[i].ptr, lck_run_mutex_locker);
1782
for (int i=0; i<thread_cnt; i++)
1783
xt_wait_for_thread_to_exit(threads[i].id, TRUE);
1784
printf("----- %d threads time=%s\n", thread_cnt, xt_trace_clock_diff(buffer, start));
1786
xt_free(self, threads);
1787
printf("TEST RESULT = %d\n", data->xs_inc);
1790
xtPublic void xt_unit_test_read_write_locks(XTThreadPtr self)
1794
memset(&data, 0, sizeof(data));
1796
printf("TEST: xt_unit_test_read_write_locks\n");
1797
printf("size of XTMutexXSLockRec = %d\n", (int) sizeof(XTMutexXSLockRec));
1798
printf("size of pthread_cond_t = %d\n", (int) sizeof(pthread_cond_t));
1799
printf("size of pthread_mutex_t = %d\n", (int) sizeof(pthread_mutex_t));
1800
xt_init_rwlock_with_autoname(self, &data.xs_plock);
1801
xt_spinxslock_init_with_autoname(self, &data.xs_spinrwlock);
1802
xt_xsmutex_init_with_autoname(self, &data.xs_fastrwlock);
1805
data.xs_interations = 10;
1806
data.xs_which_lock = ; // LOCK_PTHREAD_RW, LOCK_SPINXSLOCK, LOCK_XSMUTEX
1807
data.xs_which_job = JOB_PRINT;
1808
data.xs_debug_print = TRUE;
1809
data.xs_progress = 0;
1810
lck_reader_writer_test(self, &data, 4, 0);
1811
lck_reader_writer_test(self, &data, 0, 2);
1812
lck_reader_writer_test(self, &data, 1, 1);
1813
lck_reader_writer_test(self, &data, 4, 2);
1817
data.xs_interations = 4000;
1818
data.xs_which_lock = ; // LOCK_PTHREAD_RW, LOCK_SPINXSLOCK, LOCK_XSMUTEX
1819
data.xs_which_job = JOB_SLEEP;
1820
data.xs_debug_print = TRUE;
1821
data.xs_progress = 200;
1822
lck_reader_writer_test(self, &data, 4, 0);
1823
lck_reader_writer_test(self, &data, 0, 2);
1824
lck_reader_writer_test(self, &data, 1, 1);
1825
lck_reader_writer_test(self, &data, 4, 2);
1828
// LOCK_PTHREAD_RW, LOCK_SPINXSLOCK, LOCK_XSMUTEX
1830
data.xs_interations = 100000;
1831
data.xs_which_lock = LOCK_XSMUTEX;
1832
data.xs_which_job = JOB_DOUBLE_INC; // JOB_INCREMENT, JOB_DOUBLE_INC
1833
data.xs_debug_print = FALSE;
1834
data.xs_progress = 0;
1835
lck_reader_writer_test(self, &data, 10, 0);
1836
data.xs_which_lock = LOCK_XSMUTEX;
1837
lck_reader_writer_test(self, &data, 10, 0);
1838
//lck_reader_writer_test(self, &data, 0, 5);
1839
//lck_reader_writer_test(self, &data, 10, 0);
1840
//lck_reader_writer_test(self, &data, 10, 5);
1844
data.xs_interations = 10000;
1845
data.xs_which_lock = LOCK_XSMUTEX;
1846
data.xs_which_job = JOB_MEMCPY;
1847
data.xs_debug_print = FALSE;
1848
data.xs_progress = 0;
1849
lck_reader_writer_test(self, &data, 10, 0);
1850
data.xs_which_lock = LOCK_XSMUTEX;
1851
lck_reader_writer_test(self, &data, 10, 0);
1852
//lck_reader_writer_test(self, &data, 0, 5);
1853
//lck_reader_writer_test(self, &data, 10, 0);
1854
//lck_reader_writer_test(self, &data, 10, 5);
1858
data.xs_interations = 1000;
1859
data.xs_which_lock = LOCK_XSMUTEX;
1860
data.xs_which_job = JOB_SLEEP; // JOB_SLEEP, JOB_SNOOZE
1861
data.xs_debug_print = FALSE;
1862
data.xs_progress = 0;
1863
lck_reader_writer_test(self, &data, 10, 0);
1864
data.xs_which_lock = LOCK_XSMUTEX;
1865
lck_reader_writer_test(self, &data, 10, 0);
1868
xt_free_rwlock(&data.xs_plock);
1869
xt_spinxslock_free(self, &data.xs_spinrwlock);
1870
xt_xsmutex_free(self, &data.xs_fastrwlock);
1873
xtPublic void xt_unit_test_mutex_locks(XTThreadPtr self)
1877
memset(&data, 0, sizeof(data));
1879
printf("TEST: xt_unit_test_mutex_locks\n");
1880
xt_spinlock_init_with_autoname(self, &data.xs_spinlock);
1881
xt_init_mutex_with_autoname(self, &data.xs_mutex);
1884
data.xs_interations = 10;
1885
data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1886
data.xs_which_job = JOB_PRINT;
1887
data.xs_debug_print = TRUE;
1888
data.xs_progress = 0;
1890
lck_mutex_lock_test(self, &data, 2);
1894
data.xs_interations = 100000;
1895
data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1896
data.xs_which_job = JOB_INCREMENT;
1897
data.xs_debug_print = FALSE;
1898
data.xs_progress = 0;
1900
lck_mutex_lock_test(self, &data, 10);
1904
data.xs_interations = 10000;
1905
data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1906
data.xs_which_job = JOB_MEMCPY;
1907
data.xs_debug_print = FALSE;
1908
data.xs_progress = 0;
1910
lck_mutex_lock_test(self, &data, 10);
1914
data.xs_interations = 1000;
1915
data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1916
data.xs_which_job = JOB_SLEEP;
1917
data.xs_debug_print = FALSE;
1918
data.xs_progress = 0;
1920
lck_mutex_lock_test(self, &data, 10);
1924
data.xs_interations = 100;
1925
data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1926
data.xs_which_job = JOB_SNOOZE;
1927
data.xs_debug_print = FALSE;
1928
data.xs_progress = 0;
1930
lck_mutex_lock_test(self, &data, 10);
1933
xt_spinlock_free(self, &data.xs_spinlock);
1934
xt_free_mutex(&data.xs_mutex);
1937
xtPublic void xt_unit_test_create_threads(XTThreadPtr self)
1939
XTThreadPtr threads[10];
1941
printf("TEST: xt_unit_test_create_threads\n");
1942
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1944
/* Create some threads: */
1945
threads[0] = xt_create_daemon(self, "test0");
1946
printf("thread = %d\n", threads[0]->t_id);
1947
threads[1] = xt_create_daemon(self, "test1");
1948
printf("thread = %d\n", threads[1]->t_id);
1949
threads[2] = xt_create_daemon(self, "test2");
1950
printf("thread = %d\n", threads[2]->t_id);
1951
threads[3] = xt_create_daemon(self, "test3");
1952
printf("thread = %d\n", threads[3]->t_id);
1953
threads[4] = xt_create_daemon(self, "test4");
1954
printf("thread = %d\n", threads[4]->t_id);
1955
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1957
/* Max stays the same: */
1958
xt_free_thread(threads[3]);
1959
xt_free_thread(threads[2]);
1960
xt_free_thread(threads[1]);
1961
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1963
/* Fill in the gaps: */
1964
threads[1] = xt_create_daemon(self, "test1");
1965
printf("thread = %d\n", threads[1]->t_id);
1966
threads[2] = xt_create_daemon(self, "test2");
1967
printf("thread = %d\n", threads[2]->t_id);
1968
threads[3] = xt_create_daemon(self, "test3");
1969
printf("thread = %d\n", threads[3]->t_id);
1970
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1973
threads[5] = xt_create_daemon(self, "test5");
1974
printf("thread = %d\n", threads[5]->t_id);
1975
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1977
/* Max stays the same: */
1978
xt_free_thread(threads[3]);
1979
xt_free_thread(threads[2]);
1980
xt_free_thread(threads[1]);
1981
xt_free_thread(threads[4]);
1982
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1984
/* Recalculate the max: */
1985
xt_free_thread(threads[5]);
1986
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1988
/* Fill in the gaps: */
1989
threads[1] = xt_create_daemon(self, "test1");
1990
printf("thread = %d\n", threads[1]->t_id);
1991
threads[2] = xt_create_daemon(self, "test2");
1992
printf("thread = %d\n", threads[2]->t_id);
1993
threads[3] = xt_create_daemon(self, "test3");
1994
printf("thread = %d\n", threads[3]->t_id);
1995
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1997
xt_free_thread(threads[3]);
1998
xt_free_thread(threads[2]);
1999
xt_free_thread(threads[1]);
2000
xt_free_thread(threads[0]);
2001
printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
2005
int XTRowLocks::xt_release_locks(struct XTOpenTable *ot, xtRowID row, XTRowLockListPtr lock_list)
2007
if (ot->ot_temp_row_lock)
2008
xt_make_lock_permanent(ot, lock_list);
2010
if (!lock_list->bl_count)
2015
xtTableID tab_id, ptab_id;
2016
XTPermRowLockPtr plock;
2017
XTOpenTablePtr pot = NULL;
2018
XTRowLocksPtr row_locks;
2020
/* Do I have the lock? */
2021
group = row % XT_ROW_LOCK_COUNT;
2022
if (!(xact = tab_row_locks[group]))
2023
/* There is no lock: */
2026
if (xact != ot->ot_thread->st_xact_data)
2027
/* There is a lock but it does not belong to me! */
2030
tab_id = ot->ot_table->tab_id;
2031
plock = (XTPermRowLockPtr) &lock_list->bl_data[lock_list->bl_count * lock_list->bl_item_size];
2032
lock_list->rll_release_point = lock_list->bl_count;
2033
for (u_int i=0; i<lock_list->bl_count; i++) {
2036
pgroup = plock->pr_group;
2037
ptab_id = plock->pr_tab_id;
2039
if (ptab_id == tab_id)
2043
if (pot->ot_table->tab_id == ptab_id)
2045
xt_db_return_table_to_pool_ns(pot);
2049
if (!xt_db_open_pool_table_ns(&pot, ot->ot_table->tab_db, tab_id)) {
2050
/* Should not happen, but just in case, we just don't
2051
* remove the lock. We will probably end up with a deadlock
2054
xt_log_and_clear_exception_ns();
2055
goto skip_remove_lock;
2058
/* Can happen of the table has been dropped: */
2059
goto skip_remove_lock;
2062
row_locks = &pot->ot_table->tab_locks;
2065
#ifdef XT_TRACE_LOCKS
2066
xt_ttracef(xt_get_self(), "release lock group=%d\n", pgroup);
2068
row_locks->tab_row_locks[pgroup] = NULL;
2069
row_locks->tab_lock_perm[pgroup] = 0;
2072
lock_list->rll_release_point--;
2073
if (tab_id == ptab_id && group == pgroup)
2078
xt_db_return_table_to_pool_ns(pot);
2079
return XT_PERM_LOCK;
2082
xtBool XTRowLocks::xt_regain_locks(struct XTOpenTable *ot, int *lock_type, xtXactID *xn_id, XTRowLockListPtr lock_list)
2085
XTXactDataPtr xact, my_xact;
2086
XTPermRowLockPtr plock;
2088
XTOpenTablePtr pot = NULL;
2089
XTRowLocksPtr row_locks = NULL;
2090
XTTableHPtr tab = NULL;
2092
for (u_int i=lock_list->rll_release_point; i<lock_list->bl_count; i++) {
2093
plock = (XTPermRowLockPtr) &lock_list->bl_data[i * lock_list->bl_item_size];
2095
my_xact = ot->ot_thread->st_xact_data;
2096
group = plock->pr_group;
2097
tab_id = plock->pr_tab_id;
2099
if (tab_id == ot->ot_table->tab_id) {
2105
if (tab_id == pot->ot_table->tab_id)
2107
xt_db_return_table_to_pool_ns(pot);
2111
if (!xt_db_open_pool_table_ns(&pot, ot->ot_table->tab_db, tab_id))
2117
tab = pot->ot_table;
2118
row_locks = &tab->tab_locks;
2122
#ifdef XT_TRACE_LOCKS
2123
xt_ttracef(xt_get_self(), "regain lock group=%d\n", group);
2125
XT_TAB_ROW_WRITE_LOCK(&tab->tab_row_rwlock[group % XT_ROW_RWLOCKS], ot->ot_thread);
2126
if ((xact = row_locks->tab_row_locks[group])) {
2127
if (xact != my_xact) {
2128
*xn_id = xact->xd_start_xn_id;
2129
*lock_type = row_locks->tab_lock_perm[group] ? XT_PERM_LOCK : XT_TEMP_LOCK;
2134
row_locks->tab_row_locks[group] = my_xact;
2135
row_locks->tab_lock_perm[group] = 1;
2136
XT_TAB_ROW_UNLOCK(&tab->tab_row_rwlock[group % XT_ROW_RWLOCKS], ot->ot_thread);
2137
lock_list->rll_release_point++;
2139
*lock_type = XT_NO_LOCK;
2143
XT_TAB_ROW_UNLOCK(&tab->tab_row_rwlock[group % XT_ROW_RWLOCKS], ot->ot_thread);