~drizzle-trunk/drizzle/development

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
/* Copyright (C) 2005 PrimeBase Technologies GmbH
 *
 * PrimeBase XT
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
 *
 * 2008-01-24	Paul McCullagh
 *
 * Row lock functions.
 *
 * H&G2JCtL
 */
#ifndef __xt_lock_h__
#define __xt_lock_h__

#include "xt_defs.h"
#include "util_xt.h"
#include "locklist_xt.h"
#include "pthread_xt.h"

struct XTThread;
struct XTDatabase;
struct XTOpenTable;
struct XTXactData;
struct XTTable;

#ifdef XT_ATOMIC_SOLARIS_LIB
#include <atomic.h>
#endif

void xt_log_atomic_error_and_abort(c_char *func, c_char *file, u_int line);

/*
 * -----------------------------------------------------------------------
 * ATOMIC OPERATIONS
 */

/*
 * This macro is to remind me where it was safe
 * to use a read lock!
 */
#define xt_lck_slock		xt_spinlock_lock

/* I call these operations flushed because the result
 * is written atomically.
 * But the operations themselves are not atomic!
 */
inline void xt_atomic_inc1(volatile xtWord1 *mptr)
{
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, mptr
	__asm MOV  DL, BYTE PTR [ECX]
	__asm INC  DL
	__asm XCHG DL, BYTE PTR [ECX]
#elif defined(XT_ATOMIC_GNUC_X86)
	xtWord1 val;

	asm volatile ("movb %1,%0" : "=r" (val) : "m" (*mptr) : "memory");
	val++;
	asm volatile ("xchgb %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	atomic_inc_8(mptr);
#else
	*mptr++;
	xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
#endif
}

inline xtWord1 xt_atomic_dec1(volatile xtWord1 *mptr)
{
	xtWord1 val;

#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, mptr
	__asm MOV  DL, BYTE PTR [ECX]
	__asm DEC  DL
	__asm MOV  val, DL
	__asm XCHG DL, BYTE PTR [ECX]
#elif defined(XT_ATOMIC_GNUC_X86)
	xtWord1 val2;

	asm volatile ("movb %1, %0" : "=r" (val) : "m" (*mptr) : "memory");
	val--;
	asm volatile ("xchgb %1,%0" : "=r" (val2) : "m" (*mptr), "0" (val) : "memory");
	/* Should work, but compiler makes a mistake?
	 * asm volatile ("xchgb %1, %0" : : "r" (val), "m" (*mptr) : "memory");
	 */
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	val = atomic_dec_8_nv(mptr);
#else
	val = --(*mptr);
	xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
#endif
	return val;
}

inline void xt_atomic_inc2(volatile xtWord2 *mptr)
{
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, mptr
	__asm LOCK INC	WORD PTR [ECX]
#elif defined(XT_ATOMIC_GNUC_X86)
	asm volatile ("lock; incw %0" : : "m" (*mptr) : "memory");
#elif defined(XT_ATOMIC_GCC_OPS)
	__sync_fetch_and_add(mptr, 1);
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	atomic_inc_16_nv(mptr);
#else
	(*mptr)++;
	xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
#endif
}

inline void xt_atomic_dec2(volatile xtWord2 *mptr)
{
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, mptr
	__asm LOCK DEC	WORD PTR [ECX]
#elif defined(XT_ATOMIC_GNUC_X86)
	asm volatile ("lock; decw %0" : : "m" (*mptr) : "memory");
#elif defined(XT_ATOMIC_GCC_OPS)
	__sync_fetch_and_sub(mptr, 1);
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	atomic_dec_16_nv(mptr);
#else
	--(*mptr);
	xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
#endif
}

/* Atomic test and set 2 byte word! */
inline xtWord2 xt_atomic_tas2(volatile xtWord2 *mptr, xtWord2 val)
{
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, mptr
	__asm MOV  DX, val
	__asm XCHG DX, WORD PTR [ECX]
	__asm MOV  val, DX
#elif defined(XT_ATOMIC_GNUC_X86)
	asm volatile ("xchgw %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	val = atomic_swap_16(mptr, val);
#else
	/* Yikes! */
	xtWord2 nval = val;

	val = *mptr;
	*mptr = nval;
	xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
#endif
	return val;
}

inline void xt_atomic_set4(volatile xtWord4 *mptr, xtWord4 val)
{
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, mptr
	__asm MOV  EDX, val
	__asm XCHG EDX, DWORD PTR [ECX]
	//__asm MOV  DWORD PTR [ECX], EDX
#elif defined(XT_ATOMIC_GNUC_X86)
	asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
	//asm volatile ("movl %0,%1" : "=r" (val) : "m" (*mptr) : "memory");
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	atomic_swap_32(mptr, val);
#else
	*mptr = val;
	xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
#endif
}

inline xtWord4 xt_atomic_tas4(volatile xtWord4 *mptr, xtWord4 val)
{				
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, mptr
	__asm MOV  EDX, val
	__asm XCHG EDX, DWORD PTR [ECX]
	__asm MOV  val, EDX
#elif defined(XT_ATOMIC_GNUC_X86)
	val = val;
	asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory");
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	val = atomic_swap_32(mptr, val);
#else
	*mptr = val;
	xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__);
#endif
	return val;
}

/*
 * -----------------------------------------------------------------------
 * DIFFERENT TYPES OF LOCKS
 */

typedef struct XTSpinLock {
	volatile xtWord4			spl_lock;
#ifdef XT_NO_ATOMICS
	xt_mutex_type				spl_mutex;
#endif
#ifdef DEBUG
	struct XTThread				*spl_locker;
#endif
#ifdef XT_THREAD_LOCK_INFO
	XTThreadLockInfoRec			spl_lock_info;
	const char				    *spl_name;
#endif
} XTSpinLockRec, *XTSpinLockPtr;

#ifdef XT_THREAD_LOCK_INFO
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
void	xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp, const char *name);
#else
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b)
void	xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp);
#endif
void	xt_spinlock_free(struct XTThread *self, XTSpinLockPtr sp);
xtBool	xt_spinlock_spin(XTSpinLockPtr spl);
#ifdef DEBUG
void	xt_spinlock_set_thread(XTSpinLockPtr spl);
#endif

/* Code for test and set is derived from code by Larry Zhou and
 * Google: http://code.google.com/p/google-perftools
 */
inline xtWord4 xt_spinlock_set(XTSpinLockPtr spl)
{
	xtWord4				prv;
	volatile xtWord4	*lck;
				
	lck = &spl->spl_lock;
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, lck
	__asm MOV  EDX, 1
	__asm XCHG EDX, DWORD PTR [ECX]
	__asm MOV  prv, EDX
#elif defined(XT_ATOMIC_GNUC_X86)
	prv = 1;
	asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory");
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	prv = atomic_swap_32(lck, 1);
#else
	/* The default implementation just uses a mutex, and
	 * does not spin! */
	xt_lock_mutex_ns(&spl->spl_mutex);
	/* We have the lock */
	*lck = 1;
	prv = 0;
#endif
#ifdef DEBUG
	if (!prv)
		xt_spinlock_set_thread(spl);
#endif
	return prv;
}

inline xtWord4 xt_spinlock_reset(XTSpinLockPtr spl)
{
	xtWord4				prv;
	volatile xtWord4	*lck;
				
#ifdef DEBUG
	spl->spl_locker = NULL;
#endif
	lck = &spl->spl_lock;
#ifdef XT_ATOMIC_WIN32_X86
	__asm MOV  ECX, lck
	__asm MOV  EDX, 0
	__asm XCHG EDX, DWORD PTR [ECX]
	__asm MOV  prv, EDX
#elif defined(XT_ATOMIC_GNUC_X86)
	prv = 0;
	asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory");
#elif defined(XT_ATOMIC_SOLARIS_LIB)
	prv = atomic_swap_32(lck, 0);
#else
	*lck = 0;
	xt_unlock_mutex_ns(&spl->spl_mutex);
	prv = 1;
#endif
	return prv;
}

/*
 * Return FALSE, and register an error on failure.
 */
inline xtBool xt_spinlock_lock(XTSpinLockPtr spl)
{
	if (!xt_spinlock_set(spl)) {
#ifdef XT_THREAD_LOCK_INFO
		xt_thread_lock_info_add_owner(&spl->spl_lock_info);
#endif
		return OK;
	}
#ifdef XT_THREAD_LOCK_INFO
	xtBool spin_result = xt_spinlock_spin(spl);
	if (spin_result)
		xt_thread_lock_info_add_owner(&spl->spl_lock_info);
	return spin_result;
#else
	return xt_spinlock_spin(spl);
#endif
}

inline void xt_spinlock_unlock(XTSpinLockPtr spl)
{
	xt_spinlock_reset(spl);
#ifdef XT_THREAD_LOCK_INFO
	xt_thread_lock_info_release_owner(&spl->spl_lock_info);
#endif
}

#define XT_SXS_SLOCK_COUNT		2

typedef struct XTSpinXSLock {
	volatile xtWord2			sxs_xlocked;
	volatile xtWord2			sxs_xwaiter;
	volatile xtWord2			sxs_rlock_count;
	volatile xtWord2			sxs_wait_count;			/* The number of readers waiting for the xlocker. */
#ifdef DEBUG
	xtThreadID					sxs_locker;
#endif
#ifdef XT_THREAD_LOCK_INFO
	XTThreadLockInfoRec			sxs_lock_info;
	const char				    *sxs_name;
#endif
} XTSpinXSLockRec, *XTSpinXSLockPtr;

#ifdef XT_THREAD_LOCK_INFO
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs, const char *name);
#else
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b)
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs);
#endif
void xt_spinxslock_free(struct XTThread *self, XTSpinXSLockPtr sxs);
xtBool xt_spinxslock_xlock(XTSpinXSLockPtr sxs, xtBool try_lock, xtThreadID thd_id);
xtBool xt_spinxslock_slock(XTSpinXSLockPtr sxs);
xtBool xt_spinxslock_unlock(XTSpinXSLockPtr sxs, xtBool xlocked);

typedef struct XTMutexXSLock {
	xt_mutex_type				xsm_lock;
	xt_cond_type				xsm_xcond;
	xt_cond_type				xsm_rcond;
	volatile xtThreadID			xsm_xlocker;
	volatile xtWord2			xsm_xwait_count;
	volatile xtWord2			xsm_rlock_count;
	volatile xtWord2			xsm_rwait_count;			/* The number of readers waiting for the xlocker. */
#ifdef DEBUG
	xtThreadID					xsm_locker;
#endif
#ifdef XT_THREAD_LOCK_INFO
	XTThreadLockInfoRec			xsm_lock_info;
	const char				    *xsm_name;
#endif
} XTMutexXSLockRec, *XTMutexXSLockPtr;

#ifdef XT_THREAD_LOCK_INFO
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm, const char *name);
#else
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b)
void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm);
#endif

void xt_xsmutex_free(struct XTThread *self, XTMutexXSLockPtr xsm);
xtBool xt_xsmutex_xlock(XTMutexXSLockPtr xsm, xtThreadID thd_id);
xtBool xt_xsmutex_slock(XTMutexXSLockPtr xsm, xtThreadID thd_id);
xtBool xt_xsmutex_unlock(XTMutexXSLockPtr xsm, xtThreadID thd_id);

void xt_unit_test_read_write_locks(struct XTThread *self);
void xt_unit_test_mutex_locks(struct XTThread *self);
void xt_unit_test_create_threads(struct XTThread *self);

/*
 * -----------------------------------------------------------------------
 * ROW LOCKS
 */

/*
 * [(9)]
 *
 * These are perminent row locks. They are set on rows for 2 reasons:
 *
 * 1. To lock a row that is being updated. The row is locked
 *    when it is read, until the point that it is updated. If the row
 *    is not updated, the lock is removed.
 *    This prevents an update coming between which will cause an error
 *    on the first thread.
 *
 * 2. The locks are used to implement SELECT FOR UPDATE.
 */

/*
 * A lock that is set in order to perform an update is a temporary lock.
 * This lock will be removed once the update of the record is done.
 * The objective is to prevent some other thread from changine the
 * record between the time the record is read and updated. This is to
 * prevent unncessary "Record was updated" errors.
 *
 * A permanent lock is set by a SELECT FOR UPDATE. These locks are
 * held until the end of the transaction.
 *
 * However, a SELECT FOR UPDATE will pop its lock stack before
 * waiting for a transaction that has updated a record.
 * This is to prevent the deadlock that can occur because a
 * SELECT FOR UPDATE locks groups of records (I mean in general the
 * locks used are group locks).
 *
 * This means a SELECT FOR UPDATE can get ahead of an UPDATE as far as
 * locking is concerned. Example:
 *
 * Record 1,2 and 3 are in group A.
 *
 * T1: UPDATES record 2.
 * T2: SELECT FOR UPDATE record 1, which locks group A.
 * T2: SELECT FOR UPDATE record 2, which must wait for T1.
 * T1: UPDATES record 3, which musts wait because of group lock A.
 *
 * To avoid deadlock, T2 releases its group lock A before waiting for
 * record 2. It then regains the lock after waiting for record 2.
 *
 * (NOTE: Locks are no longer released. Please check this comment:
 * {RELEASING-LOCKS} in lock_xt.cc. )
 *
 * However, release group A lock mean first releasing all locks gained
 * after group a lock.
 *
 * For example: a thread locks groups: A, B and C. To release group B
 * lock the thread must release C as well. Afterwards, it must gain
 * B and C again, in that order. This is to ensure that the lock
 * order is NOT changed!
 *
 */
#define XT_LOCK_ERR					-1
#define XT_NO_LOCK					0
#define XT_TEMP_LOCK				1								/* A temporary lock */
#define XT_PERM_LOCK				2								/* A permanent lock */

typedef struct XTRowLockList : public XTBasicList {
	void	xt_remove_all_locks(struct XTDatabase *db, struct XTThread *thread);
} XTRowLockListRec, *XTRowLockListPtr;

#define XT_USE_LIST_BASED_ROW_LOCKS

#ifdef XT_USE_LIST_BASED_ROW_LOCKS
/*
 * This method stores each lock, and avoids conflicts.
 * But it is a bit more expensive in time.
 */

#ifdef DEBUG
#define XT_TEMP_LOCK_BYTES				10
#define XT_ROW_LOCK_GROUP_COUNT			5
#else
#define XT_TEMP_LOCK_BYTES				0xFFFF
#define XT_ROW_LOCK_GROUP_COUNT			23
#endif

typedef struct XTLockWait {
	/* Information about the lock to be aquired: */
	struct XTThread			*lw_thread;
	struct XTOpenTable		*lw_ot;
	xtRowID					lw_row_id;

	/* This is the lock currently held, and the transaction ID: */
	int						lw_curr_lock;
	xtXactID				lw_xn_id;

	/* This is information about the updating transaction: */
	xtBool					lw_row_updated;
	xtXactID				lw_updating_xn_id;

	/* Pointers for the lock list: */
	struct XTLockWait		*lw_next;
	struct XTLockWait		*lw_prev;
} XTLockWaitRec, *XTLockWaitPtr;

typedef struct XTLockItem {
	xtRowID					li_row_id;				/* The row list is sorted in this value. */
	xtWord2					li_count;				/* The number of consecutive rows locked. FFFF means a temporary lock. */
	xtWord2					li_thread_id;			/* The thread that holds this lock. */
} XTLockItemRec, *XTLockItemPtr;

typedef struct XTLockGroup {
	XTSpinLockRec			lg_lock;				/* A lock for the list. */
	XTLockWaitPtr			lg_wait_queue;			/* A queue of threads waiting for a lock in this group. */
	XTLockWaitPtr			lg_wait_queue_end;		/* The end of the thread queue. */
	size_t					lg_list_size;			/* The size of the list. */
	size_t					lg_list_in_use;			/* Number of slots on the list in use. */
	XTLockItemPtr			lg_list;				/* List of locks. */
} XTLockGroupRec, *XTLockGroupPtr;

struct XTLockWait;

typedef struct XTRowLocks {
	XTLockGroupRec			rl_groups[XT_ROW_LOCK_GROUP_COUNT];

	void	xt_cancel_temp_lock(XTLockWaitPtr lw);
	xtBool	xt_set_temp_lock(struct XTOpenTable *ot, XTLockWaitPtr lw, XTRowLockListPtr lock_list);
	void	xt_remove_temp_lock(struct XTOpenTable *ot, xtBool updated);
	xtBool	xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list);

	xtBool	rl_lock_row(XTLockGroupPtr group, XTLockWaitPtr lw, XTRowLockListPtr lock_list, int *result, struct XTThread *thread);
	void	rl_grant_locks(XTLockGroupPtr group, struct XTThread *thread);
#ifdef DEBUG_LOCK_QUEUE
	void	rl_check(XTLockWaitPtr lw);
#endif
} XTRowLocksRec, *XTRowLocksPtr;

#define XT_USE_TABLE_REF

typedef struct XTPermRowLock {
#ifdef XT_USE_TABLE_REF
	struct XTTable			*pr_table;
#else
	xtTableID				pr_tab_id;
#endif
	xtWord1					pr_group[XT_ROW_LOCK_GROUP_COUNT];
} XTPermRowLockRec, *XTPermRowLockPtr;

#else // XT_ROW_LOCK_GROUP_COUNT

/* Hash based row locking. This method allows conflics, even
 * when there is none.
 */
typedef struct XTRowLocks {
	xtWord1					tab_lock_perm[XT_ROW_LOCK_COUNT];		/* Byte set to 1 for permanent locks. */
	struct XTXactData		*tab_row_locks[XT_ROW_LOCK_COUNT];		/* The transactions that have locked the specific rows. */

	int		xt_set_temp_lock(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id, XTRowLockListPtr lock_list);
	void	xt_remove_temp_lock(struct XTOpenTable *ot);
	xtBool	xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list);
	int		xt_is_locked(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id);
} XTRowLocksRec, *XTRowLocksPtr;

typedef struct XTPermRowLock {
	xtTableID				pr_tab_id;
	xtWord4					pr_group;
} XTPermRowLockRec, *XTPermRowLockPtr;

#endif // XT_ROW_LOCK_GROUP_COUNT

xtBool			xt_init_row_locks(XTRowLocksPtr rl);
void			xt_exit_row_locks(XTRowLocksPtr rl);

xtBool			xt_init_row_lock_list(XTRowLockListPtr rl);
void			xt_exit_row_lock_list(XTRowLockListPtr rl);

#define XT_NO_LOCK				0
#define XT_WANT_LOCK			1
#define XT_HAVE_LOCK			2
#define XT_WAITING				3

/*
 * -----------------------------------------------------------------------
 * RECURSIVE MUTEX (allows lockers to lock again)
 */

typedef struct XTRecursiveMutex {
	struct XTThread				*rm_locker;
	u_int						rm_lock_count;
	xt_mutex_type				rm_mutex;

#ifdef XT_THREAD_LOCK_INFO
	XTThreadLockInfoRec			rm_lock_info;
	const char				    *rm_name;
#endif
} XTRecursiveMutexRec, *XTRecursiveMutexPtr;

#ifdef XT_THREAD_LOCK_INFO
#define xt_recursivemutex_init_with_autoname(a,b) xt_recursivemutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
void xt_recursivemutex_init(struct XTThread *self, XTRecursiveMutexPtr rm, const char *name);
#else
#define xt_recursivemutex_init_with_autoname(a,b) xt_recursivemutex_init(a,b)
void xt_recursivemutex_init(struct XTThread *self, XTRecursiveMutexPtr rm);
#endif
void xt_recursivemutex_free(XTRecursiveMutexPtr rm);
void xt_recursivemutex_lock(struct XTThread *self, XTRecursiveMutexPtr rm);
void xt_recursivemutex_unlock(struct XTThread *self, XTRecursiveMutexPtr rm);

typedef struct XTRecurRWLock {
	struct XTThread				*rrw_locker;
	u_int						rrw_lock_count;
	xt_rwlock_type				rrw_lock;

#ifdef XT_THREAD_LOCK_INFO
	XTThreadLockInfoRec			rrw_lock_info;
	const char				    *rrw_name;
#endif
} XTRecurRWLockRec, *XTRecurRWLockPtr;

#ifdef XT_THREAD_LOCK_INFO
#define xt_recurrwlock_init_with_autoname(a,b) xt_recurrwlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name);
#else
#define xt_recurrwlock_init_with_autoname(a,b) xt_recurrwlock_init(a,b)
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw);
#endif
void xt_recurrwlock_free(XTRecurRWLockPtr rrw);
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw);
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw);
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw);
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw);
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw);
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw);

#endif