~drizzle-trunk/drizzle/development

« back to all changes in this revision

Viewing changes to plugin/pbxt/src/lock_xt.cc

  • Committer: Monty Taylor
  • Date: 2008-11-16 05:36:13 UTC
  • mto: (584.1.9 devel)
  • mto: This revision was merged to the branch mainline in revision 589.
  • Revision ID: monty@inaugust.com-20081116053613-bld4rqxhlkb49c02
Split out cache_row and type_holder.

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/* Copyright (C) 2005 PrimeBase Technologies GmbH
2
 
 *
3
 
 * PrimeBase XT
4
 
 *
5
 
 * This program is free software; you can redistribute it and/or modify
6
 
 * it under the terms of the GNU General Public License as published by
7
 
 * the Free Software Foundation; either version 2 of the License, or
8
 
 * (at your option) any later version.
9
 
 *
10
 
 * This program is distributed in the hope that it will be useful,
11
 
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
 
 * GNU General Public License for more details.
14
 
 *
15
 
 * You should have received a copy of the GNU General Public License
16
 
 * along with this program; if not, write to the Free Software
17
 
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18
 
 *
19
 
 * 2008-01-24   Paul McCullagh
20
 
 *
21
 
 * Row lock functions.
22
 
 *
23
 
 * H&G2JCtL
24
 
 */
25
 
 
26
 
#include "xt_config.h"
27
 
 
28
 
#ifdef DRIZZLED
29
 
#include <bitset>
30
 
#endif
31
 
 
32
 
#include <stdio.h>
33
 
 
34
 
#include "lock_xt.h"
35
 
#include "thread_xt.h"
36
 
#include "table_xt.h"
37
 
#include "xaction_xt.h"
38
 
#include "database_xt.h"
39
 
#include "trace_xt.h"
40
 
 
41
 
#ifdef DEBUG
42
 
//#define XT_TRACE_LOCKS
43
 
//#define CHECK_ROWLOCK_GROUP_CONSISTENCY
44
 
#endif
45
 
 
46
 
/*
47
 
 * This function should never be called. It indicates a link
48
 
 * error!
49
 
 */
50
 
xtPublic void xt_log_atomic_error_and_abort(c_char *func, c_char *file, u_int line)
51
 
{
52
 
        xt_logf(NULL, func, file, line, XT_LOG_ERROR, "%s", "Atomic operations not supported\n");
53
 
        abort();
54
 
}
55
 
 
56
 
/*
57
 
 * -----------------------------------------------------------------------
58
 
 * ROW LOCKS, LIST BASED
59
 
 */
60
 
#ifdef XT_USE_LIST_BASED_ROW_LOCKS
61
 
 
62
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
63
 
/* 
64
 
 * Requires a spin-lock on group->lg_lock!
65
 
 */
66
 
static void check_rowlock_group(XTLockGroupPtr group)
67
 
{
68
 
        XTThreadPtr self = xt_get_self();
69
 
 
70
 
        char *crash = NULL;
71
 
 
72
 
        if (group->lg_lock.spl_locker != self)
73
 
                *crash = 1;
74
 
 
75
 
        if (group->lg_list_in_use > group->lg_list_size)
76
 
                *crash = 1;
77
 
 
78
 
        xtRowID prev_row = 0;
79
 
        XTLockItemPtr item = group->lg_list;
80
 
 
81
 
        for (int i = 0; i < group->lg_list_in_use; i++, item++) {
82
 
 
83
 
                if (!item->li_thread_id)
84
 
                        *crash = 1;
85
 
 
86
 
                THR_ARRAY_READ_LOCK(&xt_thr_array_resize_lock, self->t_id);
87
 
                if(!xt_thr_array[item->li_thread_id]->st_xact_data)
88
 
                        *crash = 1;
89
 
                THR_ARRAY_UNLOCK(&xt_thr_array_resize_lock, self->t_id);
90
 
 
91
 
                if(item->li_count > XT_TEMP_LOCK_BYTES)
92
 
                        *crash = 1;
93
 
 
94
 
                // rows per thread must obey the row_id > prev_row_id + prev_count*group_size rule
95
 
                if (prev_row >= item->li_row_id)
96
 
                        *crash = 1;
97
 
 
98
 
                // calculate the new prev. row
99
 
                if (item->li_count < XT_TEMP_LOCK_BYTES)
100
 
                        prev_row = item->li_row_id + (item->li_count - 1) * XT_ROW_LOCK_GROUP_COUNT;
101
 
                else
102
 
                        prev_row = item->li_row_id;
103
 
        }
104
 
}
105
 
#endif
106
 
 
107
 
static int xlock_cmp_row_ids(XTThreadPtr XT_UNUSED(self), register const void *XT_UNUSED(thunk), register const void *a, register const void *b)
108
 
{
109
 
        xtRowID                 row_id = *((xtTableID *) a);
110
 
        XTLockItemPtr   item = (XTLockItemPtr) b;
111
 
 
112
 
        if (row_id < item->li_row_id)
113
 
                return -1;
114
 
        if (row_id > item->li_row_id)
115
 
                return 1;
116
 
        return 0;
117
 
}
118
 
 
119
 
void XTRowLockList::xt_remove_all_locks(struct XTDatabase *, XTThreadPtr thread)
120
 
{
121
 
#ifdef XT_TRACE_LOCKS
122
 
        xt_ttracef(xt_get_self(), "remove all locks\n");
123
 
#endif
124
 
        if (!bl_count)
125
 
                return;
126
 
 
127
 
        xtThreadID                      thd_id;
128
 
        XTPermRowLockPtr        plock;
129
 
#ifndef XT_USE_TABLE_REF
130
 
        XTOpenTablePtr          pot = NULL;
131
 
#endif
132
 
 
133
 
        thd_id = thread->t_id;
134
 
        plock = (XTPermRowLockPtr) bl_data;
135
 
        for (u_int i=0; i<bl_count; i++) {
136
 
#ifdef XT_USE_TABLE_REF
137
 
                XTTableHPtr             tab = plock->pr_table;
138
 
#else
139
 
                if (!xt_db_open_pool_table_ns(&pot, db, plock->pr_tab_id)) {
140
 
                        /* Should not happen, but just in case, we just don't
141
 
                         * remove the lock. We will probably end up with a deadlock
142
 
                         * somewhere.
143
 
                         */
144
 
                        xt_log_and_clear_exception_ns();
145
 
                }
146
 
                else {
147
 
#endif
148
 
                        for (int j=0; j<XT_ROW_LOCK_GROUP_COUNT; j++) {
149
 
                                if (plock->pr_group[j]) {
150
 
                                        /* Go through group j and compact. */
151
 
#ifndef XT_USE_TABLE_REF
152
 
                                        XTTableHPtr             tab = pot->ot_table;
153
 
#endif
154
 
                                        XTLockGroupPtr  group;
155
 
                                        XTLockItemPtr   copy;
156
 
                                        XTLockItemPtr   item;
157
 
                                        int                             new_count;
158
 
 
159
 
                                        group = &tab->tab_locks.rl_groups[j];
160
 
                                        xt_spinlock_lock(&group->lg_lock);
161
 
                                        copy = group->lg_list;
162
 
                                        item = group->lg_list;
163
 
                                        new_count = 0;
164
 
                                        for (size_t k=0; k<group->lg_list_in_use; k++) {
165
 
                                                if (item->li_thread_id != thd_id) {
166
 
                                                        if (copy != item) {
167
 
                                                                copy->li_row_id = item->li_row_id;
168
 
                                                                copy->li_count = item->li_count;
169
 
                                                                copy->li_thread_id = item->li_thread_id;
170
 
                                                        }
171
 
                                                        new_count++;
172
 
                                                        copy++;
173
 
                                                }
174
 
#ifdef XT_TRACE_LOCKS
175
 
                                                else {
176
 
                                                        if (item->li_count == XT_TEMP_LOCK_BYTES)
177
 
                                                                xt_ttracef(xt_get_self(), "remove group %d lock row_id=%d TEMP\n", j, (int) item->li_row_id);
178
 
                                                        else
179
 
                                                                xt_ttracef(xt_get_self(), "remove group %d locks row_id=%d (%d)\n", j, (int) item->li_row_id, (int) item->li_count);
180
 
                                                }
181
 
#endif
182
 
                                                item++;
183
 
                                        }
184
 
                                        group->lg_list_in_use = new_count;
185
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
186
 
                                        check_rowlock_group(group);
187
 
#endif
188
 
                                        if (group->lg_wait_queue)
189
 
                                                tab->tab_locks.rl_grant_locks(group, thread);
190
 
 
191
 
                                        xt_spinlock_unlock(&group->lg_lock);
192
 
                                        
193
 
                                        xt_wakeup_thread_list(thread);
194
 
                                }
195
 
                        }
196
 
#ifdef XT_USE_TABLE_REF
197
 
                        xt_heap_release_ns(plock->pr_table);
198
 
#else
199
 
                        xt_db_return_table_to_pool_ns(pot);
200
 
                }
201
 
#endif
202
 
                plock++;
203
 
        }
204
 
        bl_count = 0;
205
 
}
206
 
 
207
 
#ifdef DEBUG_LOCK_QUEUE
208
 
int *dummy_ptr = 0;
209
 
 
210
 
void XTRowLocks::rl_check(XTLockWaitPtr no_lw)
211
 
{
212
 
        XTLockGroupPtr  group;
213
 
        XTLockWaitPtr   lw, lw_prev;
214
 
 
215
 
        for (int i=0; i<XT_ROW_LOCK_GROUP_COUNT; i++) {
216
 
                group = &rl_groups[i];
217
 
                xt_spinlock_lock(&group->lg_lock);
218
 
 
219
 
                lw = group->lg_wait_queue;
220
 
                lw_prev = NULL;
221
 
                while (lw) {
222
 
                        if (lw == no_lw)
223
 
                                *dummy_ptr = 1;
224
 
                        if (lw->lw_prev != lw_prev)
225
 
                                *dummy_ptr = 2;
226
 
                        lw_prev = lw;
227
 
                        lw = lw->lw_next;
228
 
                }
229
 
                xt_spinlock_unlock(&group->lg_lock);
230
 
        }
231
 
}
232
 
#endif
233
 
 
234
 
// Depending on platform 'thread->t_id' may not be used by THR_ARRAY_READ_LOCK().
235
 
xtBool XTRowLocks::rl_lock_row(XTLockGroupPtr group, XTLockWaitPtr lw, XTRowLockListPtr, int *result, XTThreadPtr thread __attribute__((unused)))
236
 
{
237
 
        XTLockItemPtr   item;
238
 
        size_t                  index;
239
 
        xtRowID                 row_id = lw->lw_row_id;
240
 
 
241
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
242
 
        check_rowlock_group(group);
243
 
#endif
244
 
        if (group->lg_list_size == group->lg_list_in_use) {
245
 
                if (!xt_realloc_ns((void **) &group->lg_list, (group->lg_list_size + 2) * sizeof(XTLockItemRec)))
246
 
                        return FAILED;
247
 
                group->lg_list_size += 2;
248
 
        }
249
 
        item = (XTLockItemPtr) xt_bsearch(NULL, &row_id, group->lg_list, group->lg_list_in_use, sizeof(XTLockItemRec), &index, NULL, xlock_cmp_row_ids);
250
 
        
251
 
        /* There's no item with this ID, but there could be an item with a range that covers this row */
252
 
        if (!item && group->lg_list_in_use) {
253
 
                if (index > 0) {
254
 
                        int count;
255
 
        
256
 
                        item = group->lg_list + index - 1;
257
 
 
258
 
                        count = item->li_count;
259
 
                        if (item->li_count == XT_TEMP_LOCK_BYTES)
260
 
                                count = 1;
261
 
 
262
 
                        if (row_id >= item->li_row_id + count * XT_ROW_LOCK_GROUP_COUNT)
263
 
                                item = NULL;
264
 
                }
265
 
        }
266
 
        
267
 
        if (item) {
268
 
                XTThreadPtr tmp_thr;
269
 
 
270
 
                /* Item already exists. */
271
 
                if (item->li_thread_id == lw->lw_thread->t_id) {
272
 
                        /* Already have a permanent lock: */
273
 
                        *result = XT_NO_LOCK;
274
 
                        lw->lw_curr_lock = XT_NO_LOCK;
275
 
                        return OK;
276
 
                }
277
 
                /* {REMOVE-LOCKS}
278
 
                 * This must be valid because a thread must remove
279
 
                 * the locks before it frees its st_xact_data structure,
280
 
                 * xt_thr_array entry must also be valid, because
281
 
                 * transaction must be ended before the thread is
282
 
                 * killed.
283
 
                 */
284
 
                *result = item->li_count == XT_TEMP_LOCK_BYTES ? XT_TEMP_LOCK : XT_PERM_LOCK;
285
 
                THR_ARRAY_READ_LOCK(&xt_thr_array_resize_lock, thread->t_id);
286
 
                tmp_thr = xt_thr_array[item->li_thread_id].td_thread;
287
 
                THR_ARRAY_UNLOCK(&xt_thr_array_resize_lock, thread->t_id);
288
 
                lw->lw_xn_id = tmp_thr->st_xact_data->xd_start_xn_id;
289
 
                lw->lw_curr_lock = *result;
290
 
                return OK;
291
 
        }
292
 
 
293
 
        /* Add the lock: */
294
 
        XT_MEMMOVE(group->lg_list, &group->lg_list[index+1], 
295
 
                &group->lg_list[index], (group->lg_list_in_use - index) * sizeof(XTLockItemRec));
296
 
        group->lg_list[index].li_row_id = row_id;
297
 
        group->lg_list[index].li_count = XT_TEMP_LOCK_BYTES;
298
 
        group->lg_list[index].li_thread_id = lw->lw_thread->t_id;
299
 
        group->lg_list_in_use++;
300
 
 
301
 
#ifdef XT_TRACE_LOCKS
302
 
        xt_ttracef(ot->ot_thread, "set temp lock row=%d setby=%s\n", (int) row_id, xt_get_self()->t_name);
303
 
#endif
304
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
305
 
        check_rowlock_group(group);
306
 
#endif
307
 
        *result = XT_NO_LOCK;
308
 
        lw->lw_ot->ot_temp_row_lock = row_id;
309
 
        lw->lw_curr_lock = XT_NO_LOCK;
310
 
        return OK;
311
 
}
312
 
 
313
 
void XTRowLocks::rl_grant_locks(XTLockGroupPtr group, XTThreadPtr thread)
314
 
{
315
 
        XTLockWaitPtr   lw, lw_next, lw_prev;
316
 
        int                             result;
317
 
        xtThreadID              lw_thd_id;
318
 
 
319
 
        thread->st_thread_list_count = 0;
320
 
        lw = group->lg_wait_queue;
321
 
        while (lw) {
322
 
                lw_next = lw->lw_next;
323
 
                lw_prev = lw->lw_prev;
324
 
                lw_thd_id = lw->lw_thread->t_id;
325
 
                /* NOTE: after lw_curr_lock is changed, lw may no longer be referenced
326
 
                 * by this function!!!
327
 
                 */
328
 
                if (!rl_lock_row(group, lw, &lw->lw_thread->st_lock_list, &result, thread)) {
329
 
                        /* We transfer the error to the other thread! */
330
 
                        XTThreadPtr self = xt_get_self();
331
 
 
332
 
                        result = XT_LOCK_ERR;
333
 
                        memcpy(&lw->lw_thread->t_exception, &self->t_exception, sizeof(XTExceptionRec));
334
 
                        lw->lw_curr_lock = XT_LOCK_ERR;
335
 
                }
336
 
                if (result == XT_NO_LOCK || result == XT_LOCK_ERR) {
337
 
                        /* Remove from the wait queue: */
338
 
                        if (lw_next)
339
 
                                lw_next->lw_prev = lw_prev;
340
 
                        if (lw_prev)
341
 
                                lw_prev->lw_next = lw_next;
342
 
                        if (group->lg_wait_queue == lw)
343
 
                                group->lg_wait_queue = lw_next;
344
 
                        if (group->lg_wait_queue_end == lw)
345
 
                                group->lg_wait_queue_end = lw_prev;
346
 
                        if (result == XT_NO_LOCK) {
347
 
                                /* Add to the thread list: */
348
 
                                if (thread->st_thread_list_count == thread->st_thread_list_size) {
349
 
                                        if (!xt_realloc_ns((void **) &thread->st_thread_list, (thread->st_thread_list_size+1) * sizeof(xtThreadID))) {
350
 
                                                xt_wakeup_thread(lw_thd_id, thread);
351
 
                                                goto done;
352
 
                                        }
353
 
                                        thread->st_thread_list_size++;
354
 
                                }
355
 
                                thread->st_thread_list[thread->st_thread_list_count] = lw_thd_id;
356
 
                                thread->st_thread_list_count++;
357
 
                                done:;
358
 
                        }
359
 
                }
360
 
                lw = lw_next;
361
 
        }
362
 
}
363
 
 
364
 
void XTRowLocks::xt_cancel_temp_lock(XTLockWaitPtr lw)
365
 
{
366
 
        XTLockGroupPtr  group;
367
 
 
368
 
        group = &rl_groups[lw->lw_row_id % XT_ROW_LOCK_GROUP_COUNT];
369
 
        xt_spinlock_lock(&group->lg_lock);
370
 
        if (lw->lw_curr_lock == XT_TEMP_LOCK || lw->lw_curr_lock == XT_PERM_LOCK) {
371
 
                /* In case of XT_LOCK_ERR or XT_NO_LOCK, the lw structure will
372
 
                 * no longer be on the wait queue.
373
 
                 */
374
 
                XTLockWaitPtr   lw_next, lw_prev;
375
 
 
376
 
                lw_next = lw->lw_next;
377
 
                lw_prev = lw->lw_prev;
378
 
 
379
 
                /* Remove from the wait queue: */
380
 
                if (lw_next)
381
 
                        lw_next->lw_prev = lw_prev;
382
 
                if (lw_prev)
383
 
                        lw_prev->lw_next = lw_next;
384
 
                if (group->lg_wait_queue == lw)
385
 
                        group->lg_wait_queue = lw_next;
386
 
                if (group->lg_wait_queue_end == lw)
387
 
                        group->lg_wait_queue_end = lw_prev;
388
 
        }
389
 
        xt_spinlock_unlock(&group->lg_lock);
390
 
}
391
 
 
392
 
//#define QUEUE_ORDER_FIFO
393
 
 
394
 
/* Try to lock a row.
395
 
 * This function returns:
396
 
 * XT_NO_LOCK on success.
397
 
 * XT_TEMP_LOCK if there is a temporary lock on the row.
398
 
 * XT_PERM_LOCK if there is a permanent lock in the row.
399
 
 * XT_FAILED an error occured.
400
 
 *
401
 
 * If there is a lock on this row, the transaction ID of the
402
 
 * locker is also returned.
403
 
 *
404
 
 * The caller must wait if the row is locked. If the lock is
405
 
 * permanent, then the caller must wait for the transaction to
406
 
 * terminate. If the lock is temporary, then the caller must
407
 
 * wait for the transaction to signal that the lock has been
408
 
 * released.
409
 
 */
410
 
xtBool XTRowLocks::xt_set_temp_lock(XTOpenTablePtr ot, XTLockWaitPtr lw, XTRowLockListPtr lock_list)
411
 
{
412
 
        XTLockGroupPtr  group;
413
 
        int                             result;
414
 
 
415
 
        if (ot->ot_temp_row_lock) {
416
 
                /* Check if we don't already have this temp lock: */
417
 
                if (ot->ot_temp_row_lock == lw->lw_row_id) {
418
 
                        lw->lw_curr_lock = XT_NO_LOCK;
419
 
                        return OK;
420
 
                }
421
 
 
422
 
                xt_make_lock_permanent(ot, lock_list);
423
 
        }
424
 
 
425
 
        /* Add a temporary lock. */
426
 
        group = &rl_groups[lw->lw_row_id % XT_ROW_LOCK_GROUP_COUNT];
427
 
        xt_spinlock_lock(&group->lg_lock);
428
 
 
429
 
        if (!rl_lock_row(group, lw, lock_list, &result, ot->ot_thread)) {
430
 
                xt_spinlock_unlock(&group->lg_lock);
431
 
                return FAILED;
432
 
        }
433
 
 
434
 
        if (result != XT_NO_LOCK) {
435
 
                /* Add the thread to the end of the thread queue: */
436
 
#ifdef QUEUE_ORDER_FIFO
437
 
                if (group->lg_wait_queue_end) {
438
 
                        group->lg_wait_queue_end->lw_next = lw;
439
 
                        lw->lw_prev = group->lg_wait_queue_end;
440
 
                }
441
 
                else {
442
 
                        group->lg_wait_queue = lw;
443
 
                        lw->lw_prev = NULL;
444
 
                }
445
 
                lw->lw_next = NULL;
446
 
                group->lg_wait_queue_end = lw;
447
 
#else
448
 
                XTLockWaitPtr   pos = group->lg_wait_queue_end;
449
 
                xtXactID                xn_id = ot->ot_thread->st_xact_data->xd_start_xn_id;
450
 
                
451
 
                while (pos) {
452
 
                        if (pos->lw_thread->st_xact_data->xd_start_xn_id < xn_id)
453
 
                                break;
454
 
                        pos = pos->lw_prev;
455
 
                }
456
 
                if (pos) {
457
 
                        lw->lw_prev = pos;
458
 
                        lw->lw_next = pos->lw_next;
459
 
                        if (pos->lw_next)
460
 
                                pos->lw_next->lw_prev = lw;
461
 
                        else
462
 
                                group->lg_wait_queue_end = lw;
463
 
                        pos->lw_next = lw;
464
 
                }
465
 
                else {
466
 
                        /* Front of the queue: */
467
 
                        lw->lw_prev = NULL;
468
 
                        lw->lw_next = group->lg_wait_queue;
469
 
                        if (group->lg_wait_queue)
470
 
                                group->lg_wait_queue->lw_prev = lw;
471
 
                        else
472
 
                                group->lg_wait_queue_end = lw;
473
 
                        group->lg_wait_queue = lw;
474
 
                }
475
 
#endif
476
 
        }
477
 
 
478
 
        xt_spinlock_unlock(&group->lg_lock);
479
 
        return OK;
480
 
}
481
 
 
482
 
/*
483
 
 * Remove a temporary lock.
484
 
 * 
485
 
 * If updated is set to TRUE this means that the row was update.
486
 
 * This means that any thread waiting on the temporary lock will
487
 
 * also have to wait for the transaction to quit before
488
 
 * continuing.
489
 
 *
490
 
 * If the thread were to continue it would just hang again because
491
 
 * it will discover that the transaction has updated the row.
492
 
 *
493
 
 * So the 'updated' flag is an optimisation which prevents the
494
 
 * thread from making an unncessary retry.
495
 
 */
496
 
void XTRowLocks::xt_remove_temp_lock(XTOpenTablePtr ot, xtBool updated)
497
 
{
498
 
        xtRowID                 row_id;
499
 
        XTLockGroupPtr  group;
500
 
        XTLockItemPtr   item;
501
 
        size_t                  index;
502
 
        xtBool                  lock_granted = FALSE;
503
 
        xtThreadID              locking_thread_id = 0;
504
 
 
505
 
        if (!(row_id = ot->ot_temp_row_lock))
506
 
                return;
507
 
 
508
 
        group = &rl_groups[row_id % XT_ROW_LOCK_GROUP_COUNT];
509
 
        xt_spinlock_lock(&group->lg_lock);
510
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
511
 
        check_rowlock_group(group);
512
 
#endif
513
 
 
514
 
#ifdef XT_TRACE_LOCKS
515
 
        xt_ttracef(xt_get_self(), "remove temp lock %d\n", (int) row_id);
516
 
#endif
517
 
        item = (XTLockItemPtr) xt_bsearch(NULL, &row_id, group->lg_list, group->lg_list_in_use, sizeof(XTLockItemRec), &index, NULL, xlock_cmp_row_ids);
518
 
        if (item) {
519
 
                /* Item exists. */
520
 
                if (item->li_thread_id == ot->ot_thread->t_id &&
521
 
                        item->li_count == XT_TEMP_LOCK_BYTES) {
522
 
                        XTLockWaitPtr   lw;
523
 
 
524
 
                        /* First check if there is some thread waiting to take over this lock: */
525
 
                        lw = group->lg_wait_queue;
526
 
                        while (lw) {
527
 
                                if (lw->lw_row_id == row_id) {
528
 
                                        lock_granted = TRUE;
529
 
                                        break;
530
 
                                }
531
 
                                lw = lw->lw_next;
532
 
                        }
533
 
 
534
 
                        if (lock_granted) {
535
 
                                /* Grant the lock just released... */
536
 
                                XTLockWaitPtr   lw_next, lw_prev;
537
 
                                xtXactID                locking_xact_id;
538
 
 
539
 
                                /* Store this info, lw will soon be untouchable! */
540
 
                                lw_next = lw->lw_next;
541
 
                                lw_prev = lw->lw_prev;
542
 
                                locking_xact_id = lw->lw_thread->st_xact_data->xd_start_xn_id;
543
 
                                locking_thread_id = lw->lw_thread->t_id;
544
 
 
545
 
                                /* Lock has moved from one thread to the next.
546
 
                                 * change the thread holding this lock:
547
 
                                 */
548
 
                                item->li_thread_id = locking_thread_id;
549
 
 
550
 
                                /* Remove from the wait queue: */
551
 
                                if (lw_next)
552
 
                                        lw_next->lw_prev = lw_prev;
553
 
                                if (lw_prev)
554
 
                                        lw_prev->lw_next = lw_next;
555
 
                                if (group->lg_wait_queue == lw)
556
 
                                        group->lg_wait_queue = lw_next;
557
 
                                if (group->lg_wait_queue_end == lw)
558
 
                                        group->lg_wait_queue_end = lw_prev;
559
 
 
560
 
                                /* If the thread that release the lock updated the
561
 
                                 * row then we will have to wait for the transaction
562
 
                                 * to terminate:
563
 
                                 */
564
 
                                if (updated) {
565
 
                                        lw->lw_row_updated = TRUE;
566
 
                                        lw->lw_updating_xn_id = ot->ot_thread->st_xact_data->xd_start_xn_id;
567
 
                                }
568
 
 
569
 
                                /* The thread has the lock now: */
570
 
                                lw->lw_ot->ot_temp_row_lock = row_id;
571
 
                                lw->lw_curr_lock = XT_NO_LOCK;
572
 
 
573
 
                                /* Everyone after this that is waiting for the same lock is
574
 
                                 * now waiting for a different transaction:
575
 
                                 */
576
 
                                lw = lw_next;
577
 
                                while (lw) {
578
 
                                        if (lw->lw_row_id == row_id) {
579
 
                                                lw->lw_xn_id = locking_xact_id;
580
 
                                                lw->lw_curr_lock = XT_TEMP_LOCK;
581
 
                                        }
582
 
                                        lw = lw->lw_next;
583
 
                                }
584
 
                        }
585
 
                        else {
586
 
                                /* Remove the lock: */
587
 
                                XT_MEMMOVE(group->lg_list, &group->lg_list[index], 
588
 
                                        &group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
589
 
                                group->lg_list_in_use--;
590
 
                        }
591
 
                }
592
 
        }
593
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
594
 
        check_rowlock_group(group);
595
 
#endif
596
 
        xt_spinlock_unlock(&group->lg_lock);
597
 
 
598
 
        ot->ot_temp_row_lock = 0;
599
 
        if (lock_granted)
600
 
                xt_wakeup_thread(locking_thread_id, ot->ot_thread);
601
 
}
602
 
 
603
 
xtBool XTRowLocks::xt_make_lock_permanent(XTOpenTablePtr ot, XTRowLockListPtr lock_list)
604
 
{
605
 
        xtRowID                 row_id;
606
 
        XTLockGroupPtr  group;
607
 
        XTLockItemPtr   item;
608
 
        size_t                  index;
609
 
 
610
 
        if (!(row_id = ot->ot_temp_row_lock))
611
 
                return OK;
612
 
 
613
 
#ifdef XT_TRACE_LOCKS
614
 
        xt_ttracef(xt_get_self(), "make lock perm %d\n", (int) ot->ot_temp_row_lock);
615
 
#endif
616
 
 
617
 
        /* Add to the lock list: */
618
 
        XTPermRowLockPtr locks = (XTPermRowLockPtr) lock_list->bl_data;
619
 
        for (unsigned i=0; i<lock_list->bl_count; i++) {
620
 
#ifdef XT_USE_TABLE_REF
621
 
                if (locks->pr_table == ot->ot_table) {
622
 
#else
623
 
                if (locks->pr_tab_id == ot->ot_table->tab_id) {
624
 
#endif
625
 
                        locks->pr_group[row_id % XT_ROW_LOCK_GROUP_COUNT] = 1;
626
 
                        goto done;
627
 
                }
628
 
                locks++;
629
 
        }
630
 
 
631
 
        /* Add new to lock list: */
632
 
        {
633
 
                XTPermRowLockRec perm_lock;
634
 
                
635
 
#ifdef XT_USE_TABLE_REF
636
 
                perm_lock.pr_table = ot->ot_table;
637
 
                xt_heap_reference_ns(perm_lock.pr_table);
638
 
#else
639
 
                perm_lock.pr_tab_id = ot->ot_table->tab_id;
640
 
#endif
641
 
                memset(perm_lock.pr_group, 0, XT_ROW_LOCK_GROUP_COUNT);
642
 
                perm_lock.pr_group[row_id % XT_ROW_LOCK_GROUP_COUNT] = 1;
643
 
                if (!xt_bl_append(NULL, lock_list, &perm_lock)) {
644
 
                        xt_remove_temp_lock(ot, FALSE);
645
 
                        return FAILED;
646
 
                }
647
 
        }
648
 
 
649
 
        done:
650
 
        group = &rl_groups[row_id % XT_ROW_LOCK_GROUP_COUNT];
651
 
        xt_spinlock_lock(&group->lg_lock);
652
 
 
653
 
        item = (XTLockItemPtr) xt_bsearch(NULL, &row_id, group->lg_list, group->lg_list_in_use, sizeof(XTLockItemRec), &index, NULL, xlock_cmp_row_ids);
654
 
        ASSERT_NS(item);
655
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
656
 
        check_rowlock_group(group);
657
 
#endif
658
 
        if (item) {
659
 
                /* Lock exists (it should!). */
660
 
                if (item->li_thread_id == ot->ot_thread->t_id &&
661
 
                        item->li_count == XT_TEMP_LOCK_BYTES) {
662
 
                        if (index > 0 &&
663
 
                                group->lg_list[index-1].li_thread_id == ot->ot_thread->t_id &&
664
 
                                group->lg_list[index-1].li_count < XT_TEMP_LOCK_BYTES-2 &&
665
 
                                group->lg_list[index-1].li_row_id == row_id - (XT_ROW_LOCK_GROUP_COUNT * group->lg_list[index-1].li_count)) {
666
 
                                group->lg_list[index-1].li_count++;
667
 
                                /* Combine with the left: */
668
 
                                if (index + 1 < group->lg_list_in_use &&
669
 
                                        group->lg_list[index+1].li_thread_id == ot->ot_thread->t_id &&
670
 
                                        group->lg_list[index+1].li_count != XT_TEMP_LOCK_BYTES &&
671
 
                                        group->lg_list[index+1].li_row_id == row_id + XT_ROW_LOCK_GROUP_COUNT) {
672
 
                                        /* And combine with the right */
673
 
                                        u_int left = group->lg_list[index-1].li_count + group->lg_list[index+1].li_count;
674
 
                                        u_int right;
675
 
 
676
 
                                        if (left > XT_TEMP_LOCK_BYTES-1) {
677
 
                                                right = left - (XT_TEMP_LOCK_BYTES-1);
678
 
                                                left = XT_TEMP_LOCK_BYTES-1;
679
 
                                        }
680
 
                                        else
681
 
                                                right = 0;
682
 
 
683
 
                                        group->lg_list[index-1].li_count = left;
684
 
                                        if (right) {
685
 
                                                /* There is something left over on the right: */
686
 
                                                group->lg_list[index+1].li_count = right;
687
 
                                                group->lg_list[index+1].li_row_id = group->lg_list[index-1].li_row_id + left * XT_ROW_LOCK_GROUP_COUNT;
688
 
                                                XT_MEMMOVE(group->lg_list, &group->lg_list[index], 
689
 
                                                        &group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
690
 
                                                group->lg_list_in_use--;
691
 
                                        }
692
 
                                        else {
693
 
                                                XT_MEMMOVE(group->lg_list, &group->lg_list[index], 
694
 
                                                        &group->lg_list[index+2], (group->lg_list_in_use - index - 2) * sizeof(XTLockItemRec));
695
 
                                                group->lg_list_in_use -= 2;
696
 
                                        }
697
 
                                }
698
 
                                else {
699
 
                                        XT_MEMMOVE(group->lg_list, &group->lg_list[index], 
700
 
                                                &group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
701
 
                                        group->lg_list_in_use--;
702
 
                                }
703
 
                        }
704
 
                        else if (index + 1 < group->lg_list_in_use &&
705
 
                                        group->lg_list[index+1].li_thread_id == ot->ot_thread->t_id &&
706
 
                                        group->lg_list[index+1].li_count < XT_TEMP_LOCK_BYTES-2 &&
707
 
                                        group->lg_list[index+1].li_row_id == row_id + XT_ROW_LOCK_GROUP_COUNT) {
708
 
                                /* Combine with the right: */
709
 
                                group->lg_list[index+1].li_count++;
710
 
                                group->lg_list[index+1].li_row_id = row_id;
711
 
                                XT_MEMMOVE(group->lg_list, &group->lg_list[index], 
712
 
                                        &group->lg_list[index+1], (group->lg_list_in_use - index - 1) * sizeof(XTLockItemRec));
713
 
                                group->lg_list_in_use--;
714
 
                        }
715
 
                        else
716
 
                                group->lg_list[index].li_count = 1;
717
 
                }
718
 
        }
719
 
#ifdef CHECK_ROWLOCK_GROUP_CONSISTENCY
720
 
        check_rowlock_group(group);
721
 
#endif
722
 
        xt_spinlock_unlock(&group->lg_lock);
723
 
 
724
 
        ot->ot_temp_row_lock = 0;
725
 
        return OK;
726
 
}
727
 
 
728
 
xtBool xt_init_row_locks(XTRowLocksPtr rl)
729
 
{
730
 
        for (int i=0; i<XT_ROW_LOCK_GROUP_COUNT; i++) {
731
 
                xt_spinlock_init_with_autoname(NULL, &rl->rl_groups[i].lg_lock);
732
 
                rl->rl_groups[i].lg_wait_queue = NULL;
733
 
                rl->rl_groups[i].lg_list_size = 0;
734
 
                rl->rl_groups[i].lg_list_in_use = 0;
735
 
                rl->rl_groups[i].lg_list = NULL;
736
 
        }
737
 
        return OK;
738
 
}
739
 
 
740
 
void xt_exit_row_locks(XTRowLocksPtr rl)
741
 
{
742
 
        for (int i=0; i<XT_ROW_LOCK_GROUP_COUNT; i++) {
743
 
                xt_spinlock_free(NULL, &rl->rl_groups[i].lg_lock);
744
 
                rl->rl_groups[i].lg_wait_queue = NULL;
745
 
                rl->rl_groups[i].lg_list_size = 0;
746
 
                rl->rl_groups[i].lg_list_in_use = 0;
747
 
                if (rl->rl_groups[i].lg_list) {
748
 
                        xt_free_ns(rl->rl_groups[i].lg_list);
749
 
                        rl->rl_groups[i].lg_list = NULL;
750
 
                }
751
 
        }
752
 
}
753
 
 
754
 
/*
755
 
 * -----------------------------------------------------------------------
756
 
 * ROW LOCKS, HASH BASED
757
 
 */
758
 
#else // XT_USE_LIST_BASED_ROW_LOCKS
759
 
 
760
 
void XTRowLockList::old_xt_remove_all_locks(struct XTDatabase *db, xtThreadID thd_id)
761
 
{
762
 
#ifdef XT_TRACE_LOCKS
763
 
        xt_ttracef(xt_get_self(), "remove all locks\n");
764
 
#endif
765
 
        if (!bl_count)
766
 
                return;
767
 
 
768
 
        int                                     pgroup;
769
 
        xtTableID                       ptab_id;
770
 
        XTPermRowLockPtr        plock;
771
 
        XTOpenTablePtr          pot = NULL;
772
 
 
773
 
        plock = (XTPermRowLockPtr) &bl_data[bl_count * bl_item_size];
774
 
        for (u_int i=0; i<bl_count; i++) {
775
 
                plock--;
776
 
                pgroup = plock->pr_group;
777
 
                ptab_id = plock->pr_tab_id;
778
 
                if (pot) {
779
 
                        if (pot->ot_table->tab_id == ptab_id)
780
 
                                goto remove_lock;
781
 
                        xt_db_return_table_to_pool_ns(pot);
782
 
                        pot = NULL;
783
 
                }
784
 
 
785
 
                if (!xt_db_open_pool_table_ns(&pot, db, ptab_id)) {
786
 
                        /* Should not happen, but just in case, we just don't
787
 
                         * remove the lock. We will probably end up with a deadlock
788
 
                         * somewhere.
789
 
                         */
790
 
                        xt_log_and_clear_exception_ns();
791
 
                        goto skip_remove_lock;
792
 
                }
793
 
                if (!pot)
794
 
                        /* Can happen of the table has been dropped: */
795
 
                        goto skip_remove_lock;
796
 
 
797
 
                remove_lock:
798
 
#ifdef XT_TRACE_LOCKS
799
 
                xt_ttracef(xt_get_self(), "remove lock group=%d\n", pgroup);
800
 
#endif
801
 
                pot->ot_table->tab_locks.tab_row_locks[pgroup] = NULL;
802
 
                pot->ot_table->tab_locks.tab_lock_perm[pgroup] = 0;
803
 
                skip_remove_lock:;
804
 
        }
805
 
        bl_count = 0;
806
 
 
807
 
        if (pot)
808
 
                xt_db_return_table_to_pool_ns(pot);
809
 
}
810
 
 
811
 
/* Try to lock a row.
812
 
 * This function returns:
813
 
 * XT_NO_LOCK on success.
814
 
 * XT_TEMP_LOCK if there is a temporary lock on the row.
815
 
 * XT_PERM_LOCK if there is a permanent lock in the row.
816
 
 *
817
 
 * If there is a lock on this row, the transaction ID of the
818
 
 * locker is also returned.
819
 
 *
820
 
 * The caller must wait if the row is locked. If the lock is
821
 
 * permanent, then the caller must wait for the transaction to
822
 
 * terminate. If the lock is temporary, then the caller must
823
 
 * wait for the transaction to signal that the lock has been
824
 
 * released.
825
 
 */
826
 
int XTRowLocks::old_xt_set_temp_lock(XTOpenTablePtr ot, xtRowID row, xtXactID *xn_id, XTRowLockListPtr lock_list)
827
 
{
828
 
        int                             group;
829
 
        XTXactDataPtr   xact, my_xact;
830
 
 
831
 
        if (ot->ot_temp_row_lock) {
832
 
                /* Check if we don't already have this temp lock: */
833
 
                if (ot->ot_temp_row_lock == row) {
834
 
                        gl->lw_curr_lock = XT_NO_LOCK;
835
 
                        return XT_NO_LOCK;
836
 
                }
837
 
 
838
 
                xt_make_lock_permanent(ot, lock_list);
839
 
        }
840
 
 
841
 
        my_xact = ot->ot_thread->st_xact_data;
842
 
        group = row % XT_ROW_LOCK_COUNT;
843
 
        if ((xact = tab_row_locks[group])) {
844
 
                if (xact == my_xact)
845
 
                        return XT_NO_LOCK;
846
 
                *xn_id = xact->xd_start_xn_id;
847
 
                return tab_lock_perm[group] ? XT_PERM_LOCK : XT_TEMP_LOCK;
848
 
        }
849
 
 
850
 
        tab_row_locks[row % XT_ROW_LOCK_COUNT] = my_xact;
851
 
 
852
 
#ifdef XT_TRACE_LOCKS
853
 
        xt_ttracef(xt_get_self(), "set temp lock %d group=%d for %s\n", (int) row, (int) row % XT_ROW_LOCK_COUNT, ot->ot_thread->t_name);
854
 
#endif
855
 
        ot->ot_temp_row_lock = row;
856
 
        return XT_NO_LOCK;
857
 
}
858
 
 
859
 
/* Just check if there is a lock on the row.
860
 
 * This function returns:
861
 
 * XT_NO_LOCK if there is no lock.
862
 
 * XT_TEMP_LOCK if there is a temporary lock on the row.
863
 
 * XT_PERM_LOCK if a lock is a permanent lock in the row.
864
 
 */
865
 
int XTRowLocks::old_xt_is_locked(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id)
866
 
{
867
 
        int                             group;
868
 
        XTXactDataPtr   xact;
869
 
 
870
 
        group = row % XT_ROW_LOCK_COUNT;
871
 
        if ((xact = tab_row_locks[group])) {
872
 
                if (xact == ot->ot_thread->st_xact_data)
873
 
                        return XT_NO_LOCK;
874
 
                *xn_id = xact->xd_start_xn_id;
875
 
                if (tab_lock_perm[group])
876
 
                        return XT_PERM_LOCK;
877
 
                return XT_TEMP_LOCK;
878
 
        }
879
 
        return XT_NO_LOCK;
880
 
}
881
 
 
882
 
void XTRowLocks::old_xt_remove_temp_lock(XTOpenTablePtr ot)
883
 
{
884
 
        int                             group;
885
 
        XTXactDataPtr   xact, my_xact;
886
 
 
887
 
        if (!ot->ot_temp_row_lock)
888
 
                return;
889
 
 
890
 
        my_xact = ot->ot_thread->st_xact_data;
891
 
        group = ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT;
892
 
#ifdef XT_TRACE_LOCKS
893
 
        xt_ttracef(xt_get_self(), "remove temp lock %d group=%d\n", (int) ot->ot_temp_row_lock, (int) ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT);
894
 
#endif
895
 
        ot->ot_temp_row_lock = 0;
896
 
        if ((xact = tab_row_locks[group])) {
897
 
                if (xact == my_xact)
898
 
                        tab_row_locks[group] = NULL;
899
 
        }
900
 
 
901
 
        if (ot->ot_table->tab_db->db_xn_wait_count)
902
 
                xt_xn_wakeup_transactions(ot->ot_table->tab_db, ot->ot_thread);
903
 
}
904
 
 
905
 
xtBool XTRowLocks::old_xt_make_lock_permanent(XTOpenTablePtr ot, XTRowLockListPtr lock_list)
906
 
{
907
 
        int group;
908
 
 
909
 
        if (!ot->ot_temp_row_lock)
910
 
                return OK;
911
 
 
912
 
#ifdef XT_TRACE_LOCKS
913
 
        xt_ttracef(xt_get_self(), "make lock perm %d group=%d\n", (int) ot->ot_temp_row_lock, (int) ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT);
914
 
#endif
915
 
        /* Check if the lock is already permanent: */
916
 
        group = ot->ot_temp_row_lock % XT_ROW_LOCK_COUNT;
917
 
        if (!tab_lock_perm[group]) {
918
 
                XTPermRowLockRec plock;
919
 
 
920
 
                plock.pr_tab_id = ot->ot_table->tab_id;
921
 
                plock.pr_group = group;
922
 
                if (!xt_bl_append(NULL, lock_list, &plock)) {
923
 
                        xt_remove_temp_lock(ot);
924
 
                        return FAILED;
925
 
                }
926
 
                tab_lock_perm[group] = 1;
927
 
        }
928
 
 
929
 
        ot->ot_temp_row_lock = 0;
930
 
        return OK;
931
 
}
932
 
 
933
 
/* Release this lock, and all locks gained after this lock
934
 
 * on this table.
935
 
 *
936
 
 * The locks are only released temporarily. The will be regained
937
 
 * below using regain locks.
938
 
 *
939
 
 * Returns:
940
 
 * XT_NO_LOCK if no lock is released.
941
 
 * XT_PERM_LOCK if a lock is released.
942
 
 *
943
 
 * Note that only permanent locks can be released in this way.
944
 
 * So if the thread has a temporary lock, it will first be made
945
 
 * permanent.
946
 
 *
947
 
 * {RELEASING-LOCKS}
948
 
 * The idea of the releasing locks comes from the fact that each
949
 
 * lock, locks a group of records.
950
 
 * So if T1 has a lock (e.g. when doing SELECT FOR UPDATE),
951
 
 * and then encounters an updated record x
952
 
 * from T2, and it must wait for T2, it firsts releases the
953
 
 * lock, just in case T2 tries to gain a lock on another
954
 
 * record y in the same group, which will cause it to
955
 
 * wait on T1.
956
 
 *
957
 
 * However, there are several problems with releasing
958
 
 * locks.
959
 
 * - It can cause a "live-lock", where another transation
960
 
 * keeps getting in before.
961
 
 * - It may not solve the problem in all cases because
962
 
 * the SELECT FOR UPDATE has locked other record groups
963
 
 * before it encountered record x.
964
 
 * - Further problems occur when locks are granted by
965
 
 * callback:
966
 
 * T1 waits for T2, because it has a lock on record x
967
 
 * T2 releases the lock because it must wait for T3
968
 
 * T1 is granted the lock (but does not know about this yet)
969
 
 * T2 tries to regain lock (after T3 quits) and
970
 
 * must wait for T1 - DEADLOCK
971
 
 *
972
 
 * In general, it does not make sense to release locks
973
 
 * when it can be granted again by a callback.
974
 
 *
975
 
 * TODO: 2 possible solutions:
976
 
 * - Do not lock groups, lock rows.
977
 
 *   UPDATE INTENSION ROW LOCK
978
 
 * - Use multiple lock types:
979
 
 *   UPDATE INTENSION LOCK (required first)
980
 
 *   SHARED UPDATE LOCK (used by INSERT or DELETE)
981
 
 *   EXCLUSIVE UPDATE LOCK (used by SELECT FOR UPDATE)
982
 
 *
983
 
 * Temporary solution. Do not release any locks.
984
 
int XTRowLocks::xt_release_locks(struct XTOpenTable *ot, xtRowID row, XTRowLockListPtr lock_list)
985
 
 */ 
986
 
 
987
 
/*
988
 
 * Regain a lock previously held. This function regains locks
989
 
 * released by xt_release_locks().
990
 
 *
991
 
 * It will return lock_type and xn_id if the row is locked, and therefore
992
 
 * regain cannot continue. In this case, the caller must wait.
993
 
 * It returns XT_NO_LOCK if there are no more locks to be regained.
994
 
 *
995
 
 * Locks are always regained in the order in which they were originally
996
 
 * taken.
997
 
xtBool XTRowLocks::xt_regain_locks(struct XTOpenTable *ot, int *lock_type, xtXactID *xn_id, XTRowLockListPtr lock_list)
998
 
 */
999
 
 
1000
 
xtBool old_xt_init_row_locks(XTRowLocksPtr rl)
1001
 
{
1002
 
        memset(rl->tab_lock_perm, 0, XT_ROW_LOCK_COUNT);
1003
 
        memset(rl->tab_row_locks, 0, XT_ROW_LOCK_COUNT * sizeof(XTXactDataPtr));
1004
 
        return OK;
1005
 
}
1006
 
 
1007
 
void old_xt_exit_row_locks(XTRowLocksPtr XT_UNUSED(rl))
1008
 
{
1009
 
}
1010
 
 
1011
 
#endif // XT_USE_LIST_BASED_ROW_LOCKS
1012
 
 
1013
 
xtPublic xtBool xt_init_row_lock_list(XTRowLockListPtr lock_list)
1014
 
{
1015
 
        lock_list->bl_item_size = sizeof(XTPermRowLockRec);
1016
 
        lock_list->bl_size = 0;
1017
 
        lock_list->bl_count = 0;
1018
 
        lock_list->bl_data = NULL;
1019
 
        return OK;
1020
 
}
1021
 
 
1022
 
xtPublic void xt_exit_row_lock_list(XTRowLockListPtr lock_list)
1023
 
{
1024
 
        xt_bl_set_size(NULL, lock_list, 0);
1025
 
}
1026
 
 
1027
 
/*
1028
 
 * -----------------------------------------------------------------------
1029
 
 * SPIN LOCK
1030
 
 */
1031
 
 
1032
 
#ifdef XT_THREAD_LOCK_INFO
1033
 
xtPublic void xt_spinlock_init(XTThreadPtr self, XTSpinLockPtr spl, const char *n)
1034
 
#else
1035
 
xtPublic void xt_spinlock_init(XTThreadPtr self, XTSpinLockPtr spl)
1036
 
#endif
1037
 
{
1038
 
        (void) self;
1039
 
        spl->spl_lock = 0;
1040
 
#ifdef XT_NO_ATOMICS
1041
 
        xt_init_mutex_with_autoname(self, &spl->spl_mutex);
1042
 
#endif
1043
 
#ifdef DEBUG
1044
 
        spl->spl_locker = 0;
1045
 
#endif
1046
 
#ifdef XT_THREAD_LOCK_INFO
1047
 
        spl->spl_name = n;
1048
 
        xt_thread_lock_info_init(&spl->spl_lock_info, spl);
1049
 
#endif
1050
 
}
1051
 
 
1052
 
xtPublic void xt_spinlock_free(XTThreadPtr XT_UNUSED(self), XTSpinLockPtr spl)
1053
 
{
1054
 
        (void) spl;
1055
 
#ifdef XT_NO_ATOMICS
1056
 
        xt_free_mutex(&spl->spl_mutex);
1057
 
#endif
1058
 
#ifdef XT_THREAD_LOCK_INFO
1059
 
        xt_thread_lock_info_free(&spl->spl_lock_info);
1060
 
#endif
1061
 
}
1062
 
 
1063
 
xtPublic xtBool xt_spinlock_spin(XTSpinLockPtr spl)
1064
 
{
1065
 
        volatile xtWord4        *lck = &spl->spl_lock;
1066
 
 
1067
 
        for (;;) {
1068
 
                for (int i=0; i<10; i++) {
1069
 
                        /* Check the lock variable: */
1070
 
                        if (!*lck) {
1071
 
                                /* Try to get the lock: */
1072
 
                                if (!xt_spinlock_set(spl))
1073
 
                                        goto done_ok;
1074
 
                        }
1075
 
                }
1076
 
 
1077
 
                /* Go to "sleep" */
1078
 
                xt_critical_wait();
1079
 
        }
1080
 
 
1081
 
        done_ok:
1082
 
        return OK;
1083
 
}
1084
 
 
1085
 
#ifdef DEBUG
1086
 
xtPublic void xt_spinlock_set_thread(XTSpinLockPtr spl)
1087
 
{
1088
 
        spl->spl_locker = xt_get_self();
1089
 
}
1090
 
#endif
1091
 
 
1092
 
/*
1093
 
 * -----------------------------------------------------------------------
1094
 
 * READ/WRITE SPIN LOCK
1095
 
 *
1096
 
 * An extremely genius very fast read/write lock based on atomics!
1097
 
 */
1098
 
 
1099
 
#ifdef XT_THREAD_LOCK_INFO
1100
 
xtPublic void xt_spinxslock_init(struct XTThread *XT_UNUSED(self), XTSpinXSLockPtr sxs, const char *name)
1101
 
#else
1102
 
xtPublic void xt_spinxslock_init(struct XTThread *XT_UNUSED(self), XTSpinXSLockPtr sxs)
1103
 
#endif
1104
 
{
1105
 
        sxs->sxs_xlocked = 0;
1106
 
        sxs->sxs_rlock_count = 0;
1107
 
        sxs->sxs_wait_count = 0;
1108
 
#ifdef DEBUG
1109
 
        sxs->sxs_locker = 0;
1110
 
#endif
1111
 
#ifdef XT_THREAD_LOCK_INFO
1112
 
        sxs->sxs_name = name;
1113
 
        xt_thread_lock_info_init(&sxs->sxs_lock_info, sxs);
1114
 
#endif
1115
 
}
1116
 
 
1117
 
xtPublic void xt_spinxslock_free(struct XTThread *XT_UNUSED(self), XTSpinXSLockPtr sxs)
1118
 
{
1119
 
#ifdef XT_THREAD_LOCK_INFO
1120
 
        xt_thread_lock_info_free(&sxs->sxs_lock_info);
1121
 
#else
1122
 
        (void) sxs;
1123
 
#endif
1124
 
}
1125
 
 
1126
 
xtPublic xtBool xt_spinxslock_xlock(XTSpinXSLockPtr sxs, xtBool try_lock, xtThreadID XT_NDEBUG_UNUSED(thd_id))
1127
 
{
1128
 
        register xtWord2 set;
1129
 
 
1130
 
        /* Wait for exclusive locker: */
1131
 
        for (;;) {
1132
 
                set = xt_atomic_tas2(&sxs->sxs_xlocked, 1);
1133
 
                if (!set)
1134
 
                        break;
1135
 
                if (try_lock)
1136
 
                        return FALSE;
1137
 
                xt_yield();
1138
 
        }
1139
 
 
1140
 
#ifdef DEBUG
1141
 
        sxs->sxs_locker = thd_id;
1142
 
#endif
1143
 
 
1144
 
        /* Wait for all the readers to wait! */
1145
 
        while (sxs->sxs_wait_count < sxs->sxs_rlock_count) {
1146
 
                sxs->sxs_xwaiter = 1;
1147
 
                xt_yield(); //*
1148
 
                /* This should not be required, because there is only one thread
1149
 
                 * accessing this value. However, the lock fails if this
1150
 
                 * is not done with an atomic op.
1151
 
                 *
1152
 
                 * This is because threads on other processors have the
1153
 
                 * value in processor cache. So they do not
1154
 
                 * notice that the value has been set to zero.
1155
 
                 * They think it is still 1 and march through
1156
 
                 * the barrier (sxs->sxs_xwaiter < sxs->sxs_xlocked) below.
1157
 
                 *
1158
 
                 * In the meantime, this X locker has gone on thinking
1159
 
                 * all is OK.
1160
 
                 */
1161
 
                xt_atomic_tas2(&sxs->sxs_xwaiter, 0);
1162
 
        }
1163
 
 
1164
 
#ifdef XT_THREAD_LOCK_INFO
1165
 
        xt_thread_lock_info_add_owner(&sxs->sxs_lock_info);
1166
 
#endif
1167
 
        return OK;
1168
 
}
1169
 
 
1170
 
xtPublic xtBool xt_spinxslock_slock(XTSpinXSLockPtr sxs)
1171
 
{
1172
 
        xt_atomic_inc2(&sxs->sxs_rlock_count);
1173
 
 
1174
 
        /* Wait as long as the locker is not waiting: */
1175
 
        while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
1176
 
                xt_atomic_inc2(&sxs->sxs_wait_count);
1177
 
                while (sxs->sxs_xwaiter < sxs->sxs_xlocked) {
1178
 
                        xt_yield();
1179
 
                }
1180
 
                xt_atomic_dec2(&sxs->sxs_wait_count);
1181
 
        }
1182
 
 
1183
 
#ifdef XT_THREAD_LOCK_INFO
1184
 
        xt_thread_lock_info_add_owner(&sxs->sxs_lock_info);
1185
 
#endif
1186
 
        return OK;
1187
 
}
1188
 
 
1189
 
xtPublic xtBool xt_spinxslock_unlock(XTSpinXSLockPtr sxs, xtBool xlocked)
1190
 
{
1191
 
        if (xlocked) {
1192
 
#ifdef DEBUG
1193
 
                ASSERT_NS(sxs->sxs_locker && sxs->sxs_xlocked);
1194
 
                sxs->sxs_locker = 0;
1195
 
#endif
1196
 
                sxs->sxs_xlocked = 0;
1197
 
        }
1198
 
        else {
1199
 
#ifdef DEBUG
1200
 
                ASSERT_NS(sxs->sxs_rlock_count > 0);
1201
 
#endif
1202
 
                xt_atomic_dec2(&sxs->sxs_rlock_count);
1203
 
        }
1204
 
 
1205
 
#ifdef XT_THREAD_LOCK_INFO
1206
 
        xt_thread_lock_info_release_owner(&sxs->sxs_lock_info);
1207
 
#endif
1208
 
        return OK;
1209
 
}
1210
 
 
1211
 
/*
1212
 
 * -----------------------------------------------------------------------
1213
 
 * FAST READ/WRITE LOCK (BASED ON FAST MUTEX)
1214
 
 */
1215
 
 
1216
 
#ifdef XT_THREAD_LOCK_INFO
1217
 
xtPublic void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm, const char *name)
1218
 
#else
1219
 
xtPublic void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm)
1220
 
#endif
1221
 
{
1222
 
        xt_init_mutex_with_autoname(self, &xsm->xsm_lock);
1223
 
        xt_init_cond(self, &xsm->xsm_xcond);
1224
 
        xt_init_cond(self, &xsm->xsm_rcond);
1225
 
        xsm->xsm_xlocker = 0;
1226
 
        xsm->xsm_rlock_count = 0;
1227
 
        xsm->xsm_rwait_count = 0;
1228
 
#ifdef DEBUG
1229
 
        xsm->xsm_locker = 0;
1230
 
#endif
1231
 
#ifdef XT_THREAD_LOCK_INFO
1232
 
        xsm->xsm_name = name;
1233
 
        xt_thread_lock_info_init(&xsm->xsm_lock_info, xsm);
1234
 
#endif
1235
 
}
1236
 
 
1237
 
xtPublic void xt_xsmutex_free(struct XTThread *XT_UNUSED(self), XTMutexXSLockPtr xsm)
1238
 
{
1239
 
        xt_free_mutex(&xsm->xsm_lock);
1240
 
        xt_free_cond(&xsm->xsm_xcond);
1241
 
        xt_free_cond(&xsm->xsm_rcond);
1242
 
#ifdef XT_THREAD_LOCK_INFO
1243
 
        xt_thread_lock_info_free(&xsm->xsm_lock_info);
1244
 
#endif
1245
 
}
1246
 
 
1247
 
xtPublic xtBool xt_xsmutex_xlock(XTMutexXSLockPtr xsm, xtThreadID thd_id)
1248
 
{
1249
 
        xt_lock_mutex_ns(&xsm->xsm_lock);
1250
 
 
1251
 
        xt_atomic_inc2(&xsm->xsm_xwait_count);
1252
 
 
1253
 
        /* Wait for exclusive locker: */
1254
 
        while (xsm->xsm_xlocker) {
1255
 
                if (!xt_timed_wait_cond_ns(&xsm->xsm_xcond, &xsm->xsm_lock, 10000)) {
1256
 
                        xsm->xsm_xwait_count--;
1257
 
                        xt_unlock_mutex_ns(&xsm->xsm_lock);
1258
 
                        return FAILED;
1259
 
                }
1260
 
        }
1261
 
 
1262
 
        /* GOTCHA: You would think this is not necessary...
1263
 
         * But is does not always work, if a normal insert is used.
1264
 
         * The reason is, I guess, on MMP the assignment is not
1265
 
         * always immediately visible to other processors, because they
1266
 
         * have old versions of this variable in there cache.
1267
 
         *
1268
 
         * But this is required, because the locking mechanism is based
1269
 
         * on:
1270
 
         * Locker: sets xlocker, tests rlock_count
1271
 
         * Reader: incs rlock_count, tests xlocker
1272
 
         *
1273
 
         * The test, in both cases, may not read stale values.
1274
 
         * volatile does not help, because this just turns compiler
1275
 
         * optimisations off.
1276
 
         */
1277
 
        xt_atomic_set4(&xsm->xsm_xlocker, thd_id);
1278
 
 
1279
 
        /* Wait for all the reader to wait! */
1280
 
        while (xsm->xsm_rwait_count < xsm->xsm_rlock_count) {
1281
 
                /* {RACE-WR_MUTEX} Here as well: */
1282
 
                if (!xt_timed_wait_cond_ns(&xsm->xsm_xcond, &xsm->xsm_lock, 100)) {
1283
 
                        xsm->xsm_xwait_count--;
1284
 
                        xsm->xsm_xlocker = 0;
1285
 
                        xt_unlock_mutex_ns(&xsm->xsm_lock);
1286
 
                        return FAILED;
1287
 
                }
1288
 
        }
1289
 
 
1290
 
        xsm->xsm_xwait_count--;
1291
 
#ifdef XT_THREAD_LOCK_INFO
1292
 
        xt_thread_lock_info_add_owner(&xsm->xsm_lock_info);
1293
 
#endif
1294
 
        return OK;
1295
 
}
1296
 
 
1297
 
xtPublic xtBool xt_xsmutex_slock(XTMutexXSLockPtr xsm, xtThreadID XT_UNUSED(thd_id))
1298
 
{
1299
 
        xt_atomic_inc2(&xsm->xsm_rlock_count);
1300
 
 
1301
 
        /* Check if there could be an X locker: */
1302
 
        if (xsm->xsm_xlocker) {
1303
 
                /* I am waiting... */
1304
 
                xt_lock_mutex_ns(&xsm->xsm_lock);
1305
 
                xsm->xsm_rwait_count++;
1306
 
                /* Wake up the xlocker: */
1307
 
                if (xsm->xsm_xlocker && xsm->xsm_rwait_count == xsm->xsm_rlock_count) {
1308
 
                        if (!xt_broadcast_cond_ns(&xsm->xsm_xcond)) {
1309
 
                                xsm->xsm_rwait_count--;
1310
 
                                xt_unlock_mutex_ns(&xsm->xsm_lock);
1311
 
                                return FAILED;
1312
 
                        }
1313
 
                }
1314
 
                while (xsm->xsm_xlocker) {
1315
 
                        if (!xt_timed_wait_cond_ns(&xsm->xsm_rcond, &xsm->xsm_lock, 10000)) {
1316
 
                                xsm->xsm_rwait_count--;
1317
 
                                xt_unlock_mutex_ns(&xsm->xsm_lock);
1318
 
                                return FAILED;
1319
 
                        }
1320
 
                }
1321
 
                xsm->xsm_rwait_count--;
1322
 
                xt_unlock_mutex_ns(&xsm->xsm_lock);
1323
 
        }
1324
 
 
1325
 
#ifdef XT_THREAD_LOCK_INFO
1326
 
        xt_thread_lock_info_add_owner(&xsm->xsm_lock_info);
1327
 
#endif
1328
 
        return OK;
1329
 
}
1330
 
 
1331
 
xtPublic xtBool xt_xsmutex_unlock(XTMutexXSLockPtr xsm, xtThreadID thd_id)
1332
 
{
1333
 
        if (xsm->xsm_xlocker == thd_id) {
1334
 
                xsm->xsm_xlocker = 0;
1335
 
                if (xsm->xsm_rwait_count) {
1336
 
                        if (!xt_broadcast_cond_ns(&xsm->xsm_rcond)) {
1337
 
                                xt_unlock_mutex_ns(&xsm->xsm_lock);
1338
 
                                return FAILED;
1339
 
                        }
1340
 
                }
1341
 
                else if (xsm->xsm_xwait_count) {
1342
 
                        /* Wake up any other X or shared lockers: */
1343
 
                        if (!xt_broadcast_cond_ns(&xsm->xsm_xcond)) {
1344
 
                                xt_unlock_mutex_ns(&xsm->xsm_lock);
1345
 
                                return FAILED;
1346
 
                        }
1347
 
                }
1348
 
                xt_unlock_mutex_ns(&xsm->xsm_lock);
1349
 
        }
1350
 
        else {
1351
 
                /* Taking the advice from {RACE-WR_MUTEX} I do the decrement
1352
 
                 * after I have a lock!
1353
 
                 */
1354
 
                if (xsm->xsm_xwait_count) {
1355
 
                        xt_lock_mutex_ns(&xsm->xsm_lock);
1356
 
                        xt_atomic_dec2(&xsm->xsm_rlock_count);
1357
 
                        if (xsm->xsm_xwait_count && xsm->xsm_rwait_count == xsm->xsm_rlock_count) {
1358
 
                                /* If the X locker is waiting for me,
1359
 
                                 * then allow him to continue. 
1360
 
                                 */
1361
 
                                if (!xt_broadcast_cond_ns(&xsm->xsm_xcond)) {
1362
 
                                        xt_unlock_mutex_ns(&xsm->xsm_lock);
1363
 
                                        return FAILED;
1364
 
                                }
1365
 
                        }
1366
 
                        xt_unlock_mutex_ns(&xsm->xsm_lock);
1367
 
                }
1368
 
                else
1369
 
                        xt_atomic_dec2(&xsm->xsm_rlock_count);
1370
 
        }
1371
 
 
1372
 
#ifdef XT_THREAD_LOCK_INFO
1373
 
        xt_thread_lock_info_release_owner(&xsm->xsm_lock_info);
1374
 
#endif
1375
 
        return OK;
1376
 
}
1377
 
 
1378
 
/*
1379
 
 * -----------------------------------------------------------------------
1380
 
 * RECURSIVE R/W LOCK (allows X lockers to lock again)
1381
 
 */
1382
 
 
1383
 
#ifdef XT_THREAD_LOCK_INFO
1384
 
void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm, const char *name)
1385
 
{
1386
 
        rm->rm_locker = NULL;
1387
 
        rm->rm_lock_count = 0;
1388
 
        xt_init_mutex(self, &rm->rm_mutex, name);
1389
 
}
1390
 
#else
1391
 
xtPublic void xt_recursivemutex_init(XTThreadPtr self, XTRecursiveMutexPtr rm)
1392
 
{
1393
 
        rm->rm_locker = NULL;
1394
 
        rm->rm_lock_count = 0;
1395
 
        xt_init_mutex(self, &rm->rm_mutex);
1396
 
}
1397
 
#endif
1398
 
 
1399
 
xtPublic void xt_recursivemutex_free(XTRecursiveMutexPtr rm)
1400
 
{
1401
 
        xt_free_mutex(&rm->rm_mutex);
1402
 
#ifdef XT_THREAD_LOCK_INFO
1403
 
        xt_thread_lock_info_free(&rm->rm_lock_info);
1404
 
#endif
1405
 
}
1406
 
 
1407
 
xtPublic void xt_recursivemutex_lock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1408
 
{
1409
 
        if (self != rm->rm_locker) {
1410
 
                xt_lock_mutex(self, &rm->rm_mutex);
1411
 
                rm->rm_locker = self;
1412
 
        }
1413
 
        rm->rm_lock_count++;
1414
 
}
1415
 
 
1416
 
xtPublic void xt_recursivemutex_unlock(XTThreadPtr self, XTRecursiveMutexPtr rm)
1417
 
{
1418
 
        ASSERT(self == rm->rm_locker);
1419
 
        ASSERT(rm->rm_lock_count > 0);
1420
 
        rm->rm_lock_count--;
1421
 
        if (!rm->rm_lock_count) {
1422
 
                rm->rm_locker = NULL;
1423
 
                xt_unlock_mutex(self, &rm->rm_mutex);
1424
 
        }
1425
 
}
1426
 
 
1427
 
/*
1428
 
 * -----------------------------------------------------------------------
1429
 
 * RECURSIVE MUTEX (allows lockers to lock again)
1430
 
 */
1431
 
 
1432
 
#ifdef XT_THREAD_LOCK_INFO
1433
 
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name)
1434
 
{
1435
 
        rrw->rrw_locker = NULL;
1436
 
        rrw->rrw_lock_count = 0;
1437
 
        xt_init_rwlock(self, &rrw->rrw_lock, name);
1438
 
}
1439
 
#else
1440
 
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw)
1441
 
{
1442
 
        rrw->rrw_locker = NULL;
1443
 
        rrw->rrw_lock_count = 0;
1444
 
        xt_init_rwlock(self, &rrw->rrw_lock);
1445
 
}
1446
 
#endif
1447
 
 
1448
 
void xt_recurrwlock_free(XTRecurRWLockPtr rrw)
1449
 
{
1450
 
        xt_free_rwlock(&rrw->rrw_lock);
1451
 
#ifdef XT_THREAD_LOCK_INFO
1452
 
        xt_thread_lock_info_free(&rrw->rrw_lock_info);
1453
 
#endif
1454
 
}
1455
 
 
1456
 
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1457
 
{
1458
 
        if (self != rrw->rrw_locker) {
1459
 
                xt_xlock_rwlock(self, &rrw->rrw_lock);
1460
 
                rrw->rrw_locker = self;
1461
 
        }
1462
 
        rrw->rrw_lock_count++;
1463
 
}
1464
 
 
1465
 
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw)
1466
 
{
1467
 
        xt_slock_rwlock(self, &rrw->rrw_lock);
1468
 
}
1469
 
 
1470
 
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw)
1471
 
{
1472
 
        xt_slock_rwlock_ns(&rrw->rrw_lock);
1473
 
}
1474
 
 
1475
 
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw)
1476
 
{
1477
 
        ASSERT(self == rrw->rrw_locker);
1478
 
        ASSERT(rrw->rrw_lock_count > 0);
1479
 
        rrw->rrw_lock_count--;
1480
 
        if (!rrw->rrw_lock_count) {
1481
 
                rrw->rrw_locker = NULL;
1482
 
                xt_unlock_rwlock(self, &rrw->rrw_lock);
1483
 
        }
1484
 
}
1485
 
 
1486
 
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw)
1487
 
{
1488
 
        xt_unlock_rwlock(self, &rrw->rrw_lock);
1489
 
}
1490
 
 
1491
 
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw)
1492
 
{
1493
 
        xt_unlock_rwlock_ns(&rrw->rrw_lock);
1494
 
}
1495
 
 
1496
 
/*
1497
 
 * -----------------------------------------------------------------------
1498
 
 * UNIT TESTS
1499
 
 */
1500
 
 
1501
 
#define JOB_MEMCPY                      1
1502
 
#define JOB_SLEEP                       2
1503
 
#define JOB_PRINT                       3
1504
 
#define JOB_INCREMENT           4
1505
 
#define JOB_SNOOZE                      5
1506
 
#define JOB_DOUBLE_INC          6
1507
 
 
1508
 
#define LOCK_PTHREAD_RW         1
1509
 
#define LOCK_PTHREAD_MUTEX      2
1510
 
#define LOCK_XSMUTEX            3
1511
 
#define LOCK_SPINXSLOCK         4
1512
 
#define LOCK_SPINLOCK           5
1513
 
 
1514
 
typedef struct XSLockTest {
1515
 
        u_int                           xs_interations;
1516
 
        xtBool                          xs_which_lock;
1517
 
        xtBool                          xs_which_job;
1518
 
        xtBool                          xs_debug_print;
1519
 
        xt_rwlock_type          xs_plock;
1520
 
        xt_mutex_type           xs_mutex;
1521
 
        XTMutexXSLockRec        xs_fastrwlock;
1522
 
        XTSpinXSLockRec         xs_spinrwlock;
1523
 
        XTSpinLockRec           xs_spinlock;
1524
 
        int                                     xs_progress;
1525
 
        xtWord4                         xs_inc;
1526
 
} XSLockTestRec, *XSLockTestPtr;
1527
 
 
1528
 
static void lck_free_thread_data(XTThreadPtr XT_UNUSED(self), void *XT_UNUSED(data))
1529
 
{
1530
 
}
1531
 
 
1532
 
static void lck_do_job(XTThreadPtr self, int job, XSLockTestPtr data, xtBool reader)
1533
 
{
1534
 
        char b1[2048], b2[2048];
1535
 
 
1536
 
        switch (job) {
1537
 
                case JOB_MEMCPY:
1538
 
                        memcpy(b1, b2, 2048);
1539
 
                        data->xs_inc++;
1540
 
                        break;
1541
 
                case JOB_SLEEP:
1542
 
                        xt_sleep_milli_second(1);
1543
 
                        data->xs_inc++;
1544
 
                        break;
1545
 
                case JOB_PRINT:
1546
 
                        printf("- %s got lock\n", self->t_name);
1547
 
                        xt_sleep_milli_second(10);
1548
 
                        data->xs_inc++;
1549
 
                        break;
1550
 
                case JOB_INCREMENT:
1551
 
                        data->xs_inc++;
1552
 
                        break;
1553
 
                case JOB_SNOOZE:
1554
 
                        xt_sleep_milli_second(10);
1555
 
                        data->xs_inc++;
1556
 
                        break;
1557
 
                case JOB_DOUBLE_INC:
1558
 
                        if (reader) {
1559
 
                                if ((data->xs_inc & 1) != 0)
1560
 
                                        printf("Noooo!\n");
1561
 
                        }
1562
 
                        else {
1563
 
                                data->xs_inc++;
1564
 
                                data->xs_inc++;
1565
 
                        }
1566
 
                        break;
1567
 
        }
1568
 
}
1569
 
 
1570
 
#if 0
1571
 
static void *lck_run_dumper(XTThreadPtr self)
1572
 
{
1573
 
        int state = 0;
1574
 
 
1575
 
        while (state != 1) {
1576
 
                sleep(1);
1577
 
                if (state == 2) {
1578
 
                        xt_dump_trace();
1579
 
                        state = 0;
1580
 
                }
1581
 
        }
1582
 
}
1583
 
#endif
1584
 
 
1585
 
static void *lck_run_reader(XTThreadPtr self)
1586
 
{
1587
 
        XSLockTestRec   *data = (XSLockTestRec *) self->t_data;
1588
 
 
1589
 
        if (data->xs_debug_print)
1590
 
                printf("- %s start\n", self->t_name);
1591
 
        for (u_int i=0; i<data->xs_interations; i++) {
1592
 
                if (data->xs_progress && ((i+1) % data->xs_progress) == 0)
1593
 
                        printf("- %s %d\n", self->t_name, i+1);
1594
 
                if (data->xs_which_lock == LOCK_PTHREAD_RW) {
1595
 
                        xt_slock_rwlock_ns(&data->xs_plock);
1596
 
                        lck_do_job(self, data->xs_which_job, data, TRUE);
1597
 
                        xt_unlock_rwlock_ns(&data->xs_plock);
1598
 
                }
1599
 
                else if (data->xs_which_lock == LOCK_SPINXSLOCK) {
1600
 
                        xt_spinxslock_slock(&data->xs_spinrwlock);
1601
 
                        lck_do_job(self, data->xs_which_job, data, TRUE);
1602
 
                        xt_spinxslock_unlock(&data->xs_spinrwlock, FALSE);
1603
 
                }
1604
 
                else if (data->xs_which_lock == LOCK_XSMUTEX) {
1605
 
                        xt_xsmutex_slock(&data->xs_fastrwlock, self->t_id);
1606
 
                        lck_do_job(self, data->xs_which_job, data, TRUE);
1607
 
                        xt_xsmutex_unlock(&data->xs_fastrwlock, self->t_id);
1608
 
                }
1609
 
                else
1610
 
                        ASSERT(FALSE);
1611
 
        }
1612
 
        if (data->xs_debug_print)
1613
 
                printf("- %s stop\n", self->t_name);
1614
 
        return NULL;
1615
 
}
1616
 
 
1617
 
static void *lck_run_writer(XTThreadPtr self)
1618
 
{
1619
 
        XSLockTestRec   *data = (XSLockTestRec *) self->t_data;
1620
 
 
1621
 
        if (data->xs_debug_print)
1622
 
                printf("- %s start\n", self->t_name);
1623
 
        for (u_int i=0; i<data->xs_interations; i++) {
1624
 
                if (data->xs_progress && ((i+1) % data->xs_progress) == 0)
1625
 
                        printf("- %s %d\n", self->t_name, i+1);
1626
 
                if (data->xs_which_lock == LOCK_PTHREAD_RW) {
1627
 
                        xt_xlock_rwlock_ns(&data->xs_plock);
1628
 
                        lck_do_job(self, data->xs_which_job, data, FALSE);
1629
 
                        xt_unlock_rwlock_ns(&data->xs_plock);
1630
 
                }
1631
 
                else if (data->xs_which_lock == LOCK_SPINXSLOCK) {
1632
 
                        xt_spinxslock_xlock(&data->xs_spinrwlock, FALSE, self->t_id);
1633
 
                        lck_do_job(self, data->xs_which_job, data, FALSE);
1634
 
                        xt_spinxslock_unlock(&data->xs_spinrwlock, TRUE);
1635
 
                }
1636
 
                else if (data->xs_which_lock == LOCK_XSMUTEX) {
1637
 
                        xt_xsmutex_xlock(&data->xs_fastrwlock, self->t_id);
1638
 
                        lck_do_job(self, data->xs_which_job, data, FALSE);
1639
 
                        xt_xsmutex_unlock(&data->xs_fastrwlock, self->t_id);
1640
 
                }
1641
 
                else
1642
 
                        ASSERT(FALSE);
1643
 
        }
1644
 
        if (data->xs_debug_print)
1645
 
                printf("- %s stop\n", self->t_name);
1646
 
        return NULL;
1647
 
}
1648
 
 
1649
 
static void lck_print_test(XSLockTestRec *data)
1650
 
{
1651
 
        switch (data->xs_which_lock) {
1652
 
                case LOCK_PTHREAD_RW:
1653
 
                        printf("pthread read/write");
1654
 
                        break;
1655
 
                case LOCK_PTHREAD_MUTEX:
1656
 
                        printf("pthread mutex");
1657
 
                        break;
1658
 
                case LOCK_SPINLOCK:
1659
 
                        printf("spin mutex");
1660
 
                        break;
1661
 
                case LOCK_SPINXSLOCK:
1662
 
                        printf("spin read/write lock");
1663
 
                        break;
1664
 
                case LOCK_XSMUTEX:
1665
 
                        printf("fast x/s mutex");
1666
 
                        break;
1667
 
        }
1668
 
 
1669
 
        switch (data->xs_which_job) {
1670
 
                case JOB_MEMCPY:
1671
 
                        printf(" MEMCPY 2K");
1672
 
                        break;
1673
 
                case JOB_SLEEP:
1674
 
                        printf(" SLEEP 1/1000s");
1675
 
                        break;
1676
 
                case JOB_PRINT:
1677
 
                        printf(" PRINT DEBUG");
1678
 
                        break;
1679
 
                case JOB_INCREMENT:
1680
 
                        printf(" INCREMENT");
1681
 
                        break;
1682
 
                case JOB_SNOOZE:
1683
 
                        printf(" SLEEP 1/100s");
1684
 
                        break;
1685
 
        }
1686
 
        
1687
 
        printf(" %d interations", data->xs_interations);
1688
 
}
1689
 
 
1690
 
static void *lck_run_mutex_locker(XTThreadPtr self)
1691
 
{
1692
 
        XSLockTestRec *data = (XSLockTestRec *) self->t_data;
1693
 
 
1694
 
        if (data->xs_debug_print)
1695
 
                printf("- %s start\n", self->t_name);
1696
 
        for (u_int i=0; i<data->xs_interations; i++) {
1697
 
                if (data->xs_progress && ((i+1) % data->xs_progress) == 0)
1698
 
                        printf("- %s %d\n", self->t_name, i+1);
1699
 
                if (data->xs_which_lock == LOCK_PTHREAD_MUTEX) {
1700
 
                        xt_lock_mutex_ns(&data->xs_mutex);
1701
 
                        lck_do_job(self, data->xs_which_job, data, FALSE);
1702
 
                        xt_unlock_mutex_ns(&data->xs_mutex);
1703
 
                }
1704
 
                else if (data->xs_which_lock == LOCK_SPINLOCK) {
1705
 
                        xt_spinlock_lock(&data->xs_spinlock);
1706
 
                        lck_do_job(self, data->xs_which_job, data, FALSE);
1707
 
                        xt_spinlock_unlock(&data->xs_spinlock);
1708
 
                }
1709
 
                else
1710
 
                        ASSERT(FALSE);
1711
 
        }
1712
 
        if (data->xs_debug_print)
1713
 
                printf("- %s stop\n", self->t_name);
1714
 
        return NULL;
1715
 
}
1716
 
 
1717
 
typedef struct LockThread {
1718
 
        xtThreadID              id;
1719
 
        XTThreadPtr             ptr;
1720
 
} LockThreadRec, *LockThreadPtr;
1721
 
 
1722
 
static void lck_reader_writer_test(XTThreadPtr self, XSLockTestRec *data, int reader_cnt, int writer_cnt)
1723
 
{
1724
 
        xtWord8                 start;
1725
 
        LockThreadPtr   threads;
1726
 
        int                             thread_cnt = reader_cnt + writer_cnt;
1727
 
        char                    buffer[40];
1728
 
 
1729
 
        //XTThreadPtr dumper = xt_create_daemon(self, "DUMPER");
1730
 
        //xt_run_thread(self, dumper, lck_run_dumper);
1731
 
 
1732
 
        printf("READ/WRITE TEST: ");
1733
 
        lck_print_test(data);
1734
 
        printf(", %d readers, %d writers\n", reader_cnt, writer_cnt);
1735
 
        threads = (LockThreadPtr) xt_malloc(self, thread_cnt * sizeof(LockThreadRec));
1736
 
 
1737
 
        for (int i=0; i<thread_cnt; i++) {
1738
 
                sprintf(buffer, "%s%d", i < reader_cnt ? "READER-" : "WRITER-", i+1);
1739
 
                threads[i].ptr = xt_create_daemon(self, buffer);
1740
 
                threads[i].id = threads[i].ptr->t_id;
1741
 
                xt_set_thread_data(threads[i].ptr, data, lck_free_thread_data);
1742
 
        }
1743
 
 
1744
 
        start = xt_trace_clock();
1745
 
        for (int i=0; i<reader_cnt; i++)
1746
 
                xt_run_thread(self, threads[i].ptr, lck_run_reader);
1747
 
        for (int i=reader_cnt; i<thread_cnt; i++)
1748
 
                xt_run_thread(self, threads[i].ptr, lck_run_writer);
1749
 
 
1750
 
        for (int i=0; i<thread_cnt; i++)
1751
 
                xt_wait_for_thread_to_exit(threads[i].id, TRUE);
1752
 
        printf("----- %d reader, %d writer time=%s\n", reader_cnt, writer_cnt, xt_trace_clock_diff(buffer, start));
1753
 
 
1754
 
        xt_free(self, threads);
1755
 
        printf("TEST RESULT = %d\n", data->xs_inc);
1756
 
 
1757
 
        //xt_wait_for_thread_to_exit(dumper, TRUE);
1758
 
}
1759
 
 
1760
 
static void lck_mutex_lock_test(XTThreadPtr self, XSLockTestRec *data, int thread_cnt)
1761
 
{
1762
 
        xtWord8                 start;
1763
 
        LockThreadPtr   threads;
1764
 
        char                    buffer[40];
1765
 
 
1766
 
        printf("LOCK MUTEX TEST: ");
1767
 
        lck_print_test(data);
1768
 
        printf(", %d threads\n", thread_cnt);
1769
 
        threads = (LockThreadPtr) xt_malloc(self, thread_cnt * sizeof(LockThreadRec));
1770
 
 
1771
 
        for (int i=0; i<thread_cnt; i++) {
1772
 
                sprintf(buffer, "THREAD%d", i+1);
1773
 
                threads[i].ptr = xt_create_daemon(self, buffer);
1774
 
                threads[i].id = threads[i].ptr->t_id;
1775
 
                xt_set_thread_data(threads[i].ptr, data, lck_free_thread_data);
1776
 
        }
1777
 
 
1778
 
        start = xt_trace_clock();
1779
 
        for (int i=0; i<thread_cnt; i++)
1780
 
                xt_run_thread(self, threads[i].ptr, lck_run_mutex_locker);
1781
 
 
1782
 
        for (int i=0; i<thread_cnt; i++)
1783
 
                xt_wait_for_thread_to_exit(threads[i].id, TRUE);
1784
 
        printf("----- %d threads time=%s\n", thread_cnt, xt_trace_clock_diff(buffer, start));
1785
 
 
1786
 
        xt_free(self, threads);
1787
 
        printf("TEST RESULT = %d\n", data->xs_inc);
1788
 
}
1789
 
 
1790
 
xtPublic void xt_unit_test_read_write_locks(XTThreadPtr self)
1791
 
{
1792
 
        XSLockTestRec   data;
1793
 
 
1794
 
        memset(&data, 0, sizeof(data));
1795
 
 
1796
 
        printf("TEST: xt_unit_test_read_write_locks\n");
1797
 
        printf("size of XTMutexXSLockRec = %d\n", (int) sizeof(XTMutexXSLockRec));
1798
 
        printf("size of pthread_cond_t = %d\n", (int) sizeof(pthread_cond_t));
1799
 
        printf("size of pthread_mutex_t = %d\n", (int) sizeof(pthread_mutex_t));
1800
 
        xt_init_rwlock_with_autoname(self, &data.xs_plock);
1801
 
        xt_spinxslock_init_with_autoname(self, &data.xs_spinrwlock);
1802
 
        xt_xsmutex_init_with_autoname(self, &data.xs_fastrwlock);
1803
 
 
1804
 
        /**
1805
 
        data.xs_interations = 10;
1806
 
        data.xs_which_lock = ; // LOCK_PTHREAD_RW, LOCK_SPINXSLOCK, LOCK_XSMUTEX
1807
 
        data.xs_which_job = JOB_PRINT;
1808
 
        data.xs_debug_print = TRUE;
1809
 
        data.xs_progress = 0;
1810
 
        lck_reader_writer_test(self, &data, 4, 0);
1811
 
        lck_reader_writer_test(self, &data, 0, 2);
1812
 
        lck_reader_writer_test(self, &data, 1, 1);
1813
 
        lck_reader_writer_test(self, &data, 4, 2);
1814
 
        **/
1815
 
 
1816
 
        /**
1817
 
        data.xs_interations = 4000;
1818
 
        data.xs_which_lock = ; // LOCK_PTHREAD_RW, LOCK_SPINXSLOCK, LOCK_XSMUTEX
1819
 
        data.xs_which_job = JOB_SLEEP;
1820
 
        data.xs_debug_print = TRUE;
1821
 
        data.xs_progress = 200;
1822
 
        lck_reader_writer_test(self, &data, 4, 0);
1823
 
        lck_reader_writer_test(self, &data, 0, 2);
1824
 
        lck_reader_writer_test(self, &data, 1, 1);
1825
 
        lck_reader_writer_test(self, &data, 4, 2);
1826
 
        **/
1827
 
 
1828
 
        // LOCK_PTHREAD_RW, LOCK_SPINXSLOCK, LOCK_XSMUTEX
1829
 
        /**/
1830
 
        data.xs_interations = 100000;
1831
 
        data.xs_which_lock = LOCK_XSMUTEX;
1832
 
        data.xs_which_job = JOB_DOUBLE_INC; // JOB_INCREMENT, JOB_DOUBLE_INC
1833
 
        data.xs_debug_print = FALSE;
1834
 
        data.xs_progress = 0;
1835
 
        lck_reader_writer_test(self, &data, 10, 0);
1836
 
        data.xs_which_lock = LOCK_XSMUTEX;
1837
 
        lck_reader_writer_test(self, &data, 10, 0);
1838
 
        //lck_reader_writer_test(self, &data, 0, 5);
1839
 
        //lck_reader_writer_test(self, &data, 10, 0);
1840
 
        //lck_reader_writer_test(self, &data, 10, 5);
1841
 
        /**/
1842
 
 
1843
 
        /**/
1844
 
        data.xs_interations = 10000;
1845
 
        data.xs_which_lock = LOCK_XSMUTEX;
1846
 
        data.xs_which_job = JOB_MEMCPY;
1847
 
        data.xs_debug_print = FALSE;
1848
 
        data.xs_progress = 0;
1849
 
        lck_reader_writer_test(self, &data, 10, 0);
1850
 
        data.xs_which_lock = LOCK_XSMUTEX;
1851
 
        lck_reader_writer_test(self, &data, 10, 0);
1852
 
        //lck_reader_writer_test(self, &data, 0, 5);
1853
 
        //lck_reader_writer_test(self, &data, 10, 0);
1854
 
        //lck_reader_writer_test(self, &data, 10, 5);
1855
 
        /**/
1856
 
 
1857
 
        /**/
1858
 
        data.xs_interations = 1000;
1859
 
        data.xs_which_lock = LOCK_XSMUTEX;
1860
 
        data.xs_which_job = JOB_SLEEP; // JOB_SLEEP, JOB_SNOOZE
1861
 
        data.xs_debug_print = FALSE;
1862
 
        data.xs_progress = 0;
1863
 
        lck_reader_writer_test(self, &data, 10, 0);
1864
 
        data.xs_which_lock = LOCK_XSMUTEX;
1865
 
        lck_reader_writer_test(self, &data, 10, 0);
1866
 
        /**/
1867
 
 
1868
 
        xt_free_rwlock(&data.xs_plock);
1869
 
        xt_spinxslock_free(self, &data.xs_spinrwlock);
1870
 
        xt_xsmutex_free(self, &data.xs_fastrwlock);
1871
 
}
1872
 
 
1873
 
xtPublic void xt_unit_test_mutex_locks(XTThreadPtr self)
1874
 
{
1875
 
        XSLockTestRec   data;
1876
 
 
1877
 
        memset(&data, 0, sizeof(data));
1878
 
 
1879
 
        printf("TEST: xt_unit_test_mutex_locks\n");
1880
 
        xt_spinlock_init_with_autoname(self, &data.xs_spinlock);
1881
 
        xt_init_mutex_with_autoname(self, &data.xs_mutex);
1882
 
 
1883
 
        /**/
1884
 
        data.xs_interations = 10;
1885
 
        data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1886
 
        data.xs_which_job = JOB_PRINT;
1887
 
        data.xs_debug_print = TRUE;
1888
 
        data.xs_progress = 0;
1889
 
        data.xs_inc = 0;
1890
 
        lck_mutex_lock_test(self, &data, 2);
1891
 
        /**/
1892
 
 
1893
 
        /**/
1894
 
        data.xs_interations = 100000;
1895
 
        data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1896
 
        data.xs_which_job = JOB_INCREMENT;
1897
 
        data.xs_debug_print = FALSE;
1898
 
        data.xs_progress = 0;
1899
 
        data.xs_inc = 0;
1900
 
        lck_mutex_lock_test(self, &data, 10);
1901
 
        /**/
1902
 
 
1903
 
        /**/
1904
 
        data.xs_interations = 10000;
1905
 
        data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1906
 
        data.xs_which_job = JOB_MEMCPY;
1907
 
        data.xs_debug_print = FALSE;
1908
 
        data.xs_progress = 0;
1909
 
        data.xs_inc = 0;
1910
 
        lck_mutex_lock_test(self, &data, 10);
1911
 
        /**/
1912
 
 
1913
 
        /**/
1914
 
        data.xs_interations = 1000;
1915
 
        data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1916
 
        data.xs_which_job = JOB_SLEEP;
1917
 
        data.xs_debug_print = FALSE;
1918
 
        data.xs_progress = 0;
1919
 
        data.xs_inc = 0;
1920
 
        lck_mutex_lock_test(self, &data, 10);
1921
 
        /**/
1922
 
 
1923
 
        /**/
1924
 
        data.xs_interations = 100;
1925
 
        data.xs_which_lock = LOCK_SPINLOCK; // LOCK_SPINLOCK, LOCK_PTHREAD_MUTEX
1926
 
        data.xs_which_job = JOB_SNOOZE;
1927
 
        data.xs_debug_print = FALSE;
1928
 
        data.xs_progress = 0;
1929
 
        data.xs_inc = 0;
1930
 
        lck_mutex_lock_test(self, &data, 10);
1931
 
        /**/
1932
 
 
1933
 
        xt_spinlock_free(self, &data.xs_spinlock);
1934
 
        xt_free_mutex(&data.xs_mutex);
1935
 
}
1936
 
 
1937
 
xtPublic void xt_unit_test_create_threads(XTThreadPtr self)
1938
 
{
1939
 
        XTThreadPtr             threads[10];
1940
 
 
1941
 
        printf("TEST: xt_unit_test_create_threads\n");
1942
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1943
 
 
1944
 
        /* Create some threads: */
1945
 
        threads[0] = xt_create_daemon(self, "test0");
1946
 
        printf("thread = %d\n", threads[0]->t_id);
1947
 
        threads[1] = xt_create_daemon(self, "test1");
1948
 
        printf("thread = %d\n", threads[1]->t_id);
1949
 
        threads[2] = xt_create_daemon(self, "test2");
1950
 
        printf("thread = %d\n", threads[2]->t_id);
1951
 
        threads[3] = xt_create_daemon(self, "test3");
1952
 
        printf("thread = %d\n", threads[3]->t_id);
1953
 
        threads[4] = xt_create_daemon(self, "test4");
1954
 
        printf("thread = %d\n", threads[4]->t_id);
1955
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1956
 
 
1957
 
        /* Max stays the same: */
1958
 
        xt_free_thread(threads[3]);
1959
 
        xt_free_thread(threads[2]);
1960
 
        xt_free_thread(threads[1]);
1961
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1962
 
 
1963
 
        /* Fill in the gaps: */
1964
 
        threads[1] = xt_create_daemon(self, "test1");
1965
 
        printf("thread = %d\n", threads[1]->t_id);
1966
 
        threads[2] = xt_create_daemon(self, "test2");
1967
 
        printf("thread = %d\n", threads[2]->t_id);
1968
 
        threads[3] = xt_create_daemon(self, "test3");
1969
 
        printf("thread = %d\n", threads[3]->t_id);
1970
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1971
 
 
1972
 
        /* And add one: */
1973
 
        threads[5] = xt_create_daemon(self, "test5");
1974
 
        printf("thread = %d\n", threads[5]->t_id);
1975
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1976
 
 
1977
 
        /* Max stays the same: */
1978
 
        xt_free_thread(threads[3]);
1979
 
        xt_free_thread(threads[2]);
1980
 
        xt_free_thread(threads[1]);
1981
 
        xt_free_thread(threads[4]);
1982
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1983
 
 
1984
 
        /* Recalculate the max: */
1985
 
        xt_free_thread(threads[5]);
1986
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1987
 
 
1988
 
        /* Fill in the gaps: */
1989
 
        threads[1] = xt_create_daemon(self, "test1");
1990
 
        printf("thread = %d\n", threads[1]->t_id);
1991
 
        threads[2] = xt_create_daemon(self, "test2");
1992
 
        printf("thread = %d\n", threads[2]->t_id);
1993
 
        threads[3] = xt_create_daemon(self, "test3");
1994
 
        printf("thread = %d\n", threads[3]->t_id);
1995
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
1996
 
 
1997
 
        xt_free_thread(threads[3]);
1998
 
        xt_free_thread(threads[2]);
1999
 
        xt_free_thread(threads[1]);
2000
 
        xt_free_thread(threads[0]);
2001
 
        printf("current max threads = %d, in use = %d\n", xt_thr_current_max_threads, xt_thr_current_thread_count);
2002
 
}
2003
 
 
2004
 
#ifdef UNUSED_CODE
2005
 
int XTRowLocks::xt_release_locks(struct XTOpenTable *ot, xtRowID row, XTRowLockListPtr lock_list)
2006
 
{
2007
 
        if (ot->ot_temp_row_lock)
2008
 
                xt_make_lock_permanent(ot, lock_list);
2009
 
 
2010
 
        if (!lock_list->bl_count)
2011
 
                return XT_NO_LOCK;
2012
 
 
2013
 
        int                                     group, pgroup;
2014
 
        XTXactDataPtr           xact;
2015
 
        xtTableID                       tab_id, ptab_id;
2016
 
        XTPermRowLockPtr        plock;
2017
 
        XTOpenTablePtr          pot = NULL;
2018
 
        XTRowLocksPtr           row_locks;
2019
 
 
2020
 
        /* Do I have the lock? */
2021
 
        group = row % XT_ROW_LOCK_COUNT;
2022
 
        if (!(xact = tab_row_locks[group]))
2023
 
                /* There is no lock: */
2024
 
                return XT_NO_LOCK;
2025
 
 
2026
 
        if (xact != ot->ot_thread->st_xact_data)
2027
 
                /* There is a lock but it does not belong to me! */
2028
 
                return XT_NO_LOCK;
2029
 
 
2030
 
        tab_id = ot->ot_table->tab_id;
2031
 
        plock = (XTPermRowLockPtr) &lock_list->bl_data[lock_list->bl_count * lock_list->bl_item_size];
2032
 
        lock_list->rll_release_point = lock_list->bl_count;
2033
 
        for (u_int i=0; i<lock_list->bl_count; i++) {
2034
 
                plock--;
2035
 
 
2036
 
                pgroup = plock->pr_group;
2037
 
                ptab_id = plock->pr_tab_id;
2038
 
 
2039
 
                if (ptab_id == tab_id)
2040
 
                        row_locks = this;
2041
 
                else {
2042
 
                        if (pot) {
2043
 
                                if (pot->ot_table->tab_id == ptab_id)
2044
 
                                        goto remove_lock;
2045
 
                                xt_db_return_table_to_pool_ns(pot);
2046
 
                                pot = NULL;
2047
 
                        }
2048
 
 
2049
 
                        if (!xt_db_open_pool_table_ns(&pot, ot->ot_table->tab_db, tab_id)) {
2050
 
                                /* Should not happen, but just in case, we just don't
2051
 
                                 * remove the lock. We will probably end up with a deadlock
2052
 
                                 * somewhere.
2053
 
                                 */
2054
 
                                xt_log_and_clear_exception_ns();
2055
 
                                goto skip_remove_lock;
2056
 
                        }
2057
 
                        if (!pot)
2058
 
                                /* Can happen of the table has been dropped: */
2059
 
                                goto skip_remove_lock;
2060
 
 
2061
 
                        remove_lock:
2062
 
                        row_locks = &pot->ot_table->tab_locks;
2063
 
                }
2064
 
 
2065
 
#ifdef XT_TRACE_LOCKS
2066
 
                xt_ttracef(xt_get_self(), "release lock group=%d\n", pgroup);
2067
 
#endif
2068
 
                row_locks->tab_row_locks[pgroup] = NULL;
2069
 
                row_locks->tab_lock_perm[pgroup] = 0;
2070
 
                skip_remove_lock:;
2071
 
 
2072
 
                lock_list->rll_release_point--;
2073
 
                if (tab_id == ptab_id && group == pgroup)
2074
 
                        break;
2075
 
        }
2076
 
 
2077
 
        if (pot) 
2078
 
                xt_db_return_table_to_pool_ns(pot);
2079
 
        return XT_PERM_LOCK;
2080
 
}
2081
 
 
2082
 
xtBool XTRowLocks::xt_regain_locks(struct XTOpenTable *ot, int *lock_type, xtXactID *xn_id, XTRowLockListPtr lock_list)
2083
 
{
2084
 
        int                                     group;
2085
 
        XTXactDataPtr           xact, my_xact;
2086
 
        XTPermRowLockPtr        plock;
2087
 
        xtTableID                       tab_id;
2088
 
        XTOpenTablePtr          pot = NULL;
2089
 
        XTRowLocksPtr           row_locks = NULL;
2090
 
        XTTableHPtr                     tab = NULL;
2091
 
 
2092
 
        for (u_int i=lock_list->rll_release_point; i<lock_list->bl_count; i++) {
2093
 
                plock = (XTPermRowLockPtr) &lock_list->bl_data[i * lock_list->bl_item_size];
2094
 
 
2095
 
                my_xact = ot->ot_thread->st_xact_data;
2096
 
                group = plock->pr_group;
2097
 
                tab_id = plock->pr_tab_id;
2098
 
 
2099
 
                if (tab_id == ot->ot_table->tab_id) {
2100
 
                        row_locks = this;
2101
 
                        tab = ot->ot_table;
2102
 
                }
2103
 
                else {
2104
 
                        if (pot) {
2105
 
                                if (tab_id == pot->ot_table->tab_id)
2106
 
                                        goto gain_lock;
2107
 
                                xt_db_return_table_to_pool_ns(pot);
2108
 
                                pot = NULL;
2109
 
                        }
2110
 
 
2111
 
                        if (!xt_db_open_pool_table_ns(&pot, ot->ot_table->tab_db, tab_id))
2112
 
                                return FAILED;
2113
 
                        if (!pot)
2114
 
                                goto no_gain_lock;
2115
 
                        
2116
 
                        gain_lock:
2117
 
                        tab = pot->ot_table;
2118
 
                        row_locks = &tab->tab_locks;
2119
 
                        no_gain_lock:;
2120
 
                }
2121
 
 
2122
 
#ifdef XT_TRACE_LOCKS
2123
 
                xt_ttracef(xt_get_self(), "regain lock group=%d\n", group);
2124
 
#endif
2125
 
                XT_TAB_ROW_WRITE_LOCK(&tab->tab_row_rwlock[group % XT_ROW_RWLOCKS], ot->ot_thread);
2126
 
                if ((xact = row_locks->tab_row_locks[group])) {
2127
 
                        if (xact != my_xact) {
2128
 
                                *xn_id = xact->xd_start_xn_id;
2129
 
                                *lock_type = row_locks->tab_lock_perm[group] ? XT_PERM_LOCK : XT_TEMP_LOCK;
2130
 
                                goto done;
2131
 
                        }
2132
 
                }
2133
 
                else
2134
 
                        row_locks->tab_row_locks[group] = my_xact;
2135
 
                row_locks->tab_lock_perm[group] = 1;
2136
 
                XT_TAB_ROW_UNLOCK(&tab->tab_row_rwlock[group % XT_ROW_RWLOCKS], ot->ot_thread);
2137
 
                lock_list->rll_release_point++;
2138
 
        }
2139
 
        *lock_type = XT_NO_LOCK;
2140
 
        return OK;
2141
 
 
2142
 
        done:
2143
 
        XT_TAB_ROW_UNLOCK(&tab->tab_row_rwlock[group % XT_ROW_RWLOCKS], ot->ot_thread);
2144
 
        return OK;
2145
 
}
2146
 
 
2147
 
#endif