1999.6.1
by kalebral at gmail
update Copyright strings to a more common format to help with creating the master debian copyright file |
1 |
/* Copyright (C) 2005 PrimeBase Technologies GmbH
|
1455.3.1
by Vladimir Kolesnikov
lp:drizzle + pbxt 1.1 + test results |
2 |
*
|
3 |
* PrimeBase XT
|
|
4 |
*
|
|
5 |
* This program is free software; you can redistribute it and/or modify
|
|
6 |
* it under the terms of the GNU General Public License as published by
|
|
7 |
* the Free Software Foundation; either version 2 of the License, or
|
|
8 |
* (at your option) any later version.
|
|
9 |
*
|
|
10 |
* This program is distributed in the hope that it will be useful,
|
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
13 |
* GNU General Public License for more details.
|
|
14 |
*
|
|
15 |
* You should have received a copy of the GNU General Public License
|
|
16 |
* along with this program; if not, write to the Free Software
|
|
1802.10.2
by Monty Taylor
Update all of the copyright headers to include the correct address. |
17 |
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
1455.3.1
by Vladimir Kolesnikov
lp:drizzle + pbxt 1.1 + test results |
18 |
*
|
19 |
* 2008-01-24 Paul McCullagh
|
|
20 |
*
|
|
21 |
* Row lock functions.
|
|
22 |
*
|
|
23 |
* H&G2JCtL
|
|
24 |
*/
|
|
25 |
#ifndef __xt_lock_h__
|
|
26 |
#define __xt_lock_h__
|
|
27 |
||
28 |
#include "xt_defs.h" |
|
29 |
#include "util_xt.h" |
|
30 |
#include "locklist_xt.h" |
|
31 |
#include "pthread_xt.h" |
|
32 |
||
33 |
struct XTThread; |
|
34 |
struct XTDatabase; |
|
35 |
struct XTOpenTable; |
|
36 |
struct XTXactData; |
|
37 |
struct XTTable; |
|
38 |
||
39 |
#ifdef XT_ATOMIC_SOLARIS_LIB
|
|
40 |
#include <atomic.h> |
|
41 |
#endif
|
|
42 |
||
43 |
void xt_log_atomic_error_and_abort(c_char *func, c_char *file, u_int line); |
|
44 |
||
45 |
/*
|
|
46 |
* -----------------------------------------------------------------------
|
|
47 |
* ATOMIC OPERATIONS
|
|
48 |
*/
|
|
49 |
||
50 |
/*
|
|
51 |
* This macro is to remind me where it was safe
|
|
52 |
* to use a read lock!
|
|
53 |
*/
|
|
54 |
#define xt_lck_slock xt_spinlock_lock
|
|
55 |
||
56 |
/* I call these operations flushed because the result
|
|
57 |
* is written atomically.
|
|
58 |
* But the operations themselves are not atomic!
|
|
59 |
*/
|
|
60 |
inline void xt_atomic_inc1(volatile xtWord1 *mptr) |
|
61 |
{
|
|
62 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
63 |
__asm MOV ECX, mptr |
|
64 |
__asm MOV DL, BYTE PTR [ECX] |
|
65 |
__asm INC DL |
|
66 |
__asm XCHG DL, BYTE PTR [ECX] |
|
67 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
68 |
xtWord1 val; |
|
69 |
||
70 |
asm volatile ("movb %1,%0" : "=r" (val) : "m" (*mptr) : "memory"); |
|
71 |
val++; |
|
72 |
asm volatile ("xchgb %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
|
73 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
74 |
atomic_inc_8(mptr); |
|
75 |
#else
|
|
76 |
*mptr++; |
|
77 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
78 |
#endif
|
|
79 |
}
|
|
80 |
||
81 |
inline xtWord1 xt_atomic_dec1(volatile xtWord1 *mptr) |
|
82 |
{
|
|
83 |
xtWord1 val; |
|
84 |
||
85 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
86 |
__asm MOV ECX, mptr |
|
87 |
__asm MOV DL, BYTE PTR [ECX] |
|
88 |
__asm DEC DL |
|
89 |
__asm MOV val, DL |
|
90 |
__asm XCHG DL, BYTE PTR [ECX] |
|
91 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
92 |
xtWord1 val2; |
|
93 |
||
94 |
asm volatile ("movb %1, %0" : "=r" (val) : "m" (*mptr) : "memory"); |
|
95 |
val--; |
|
96 |
asm volatile ("xchgb %1,%0" : "=r" (val2) : "m" (*mptr), "0" (val) : "memory"); |
|
97 |
/* Should work, but compiler makes a mistake?
|
|
98 |
* asm volatile ("xchgb %1, %0" : : "r" (val), "m" (*mptr) : "memory");
|
|
99 |
*/
|
|
100 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
101 |
val = atomic_dec_8_nv(mptr); |
|
102 |
#else
|
|
103 |
val = --(*mptr); |
|
104 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
105 |
#endif
|
|
106 |
return val; |
|
107 |
}
|
|
108 |
||
109 |
inline void xt_atomic_inc2(volatile xtWord2 *mptr) |
|
110 |
{
|
|
111 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
112 |
__asm MOV ECX, mptr |
|
113 |
__asm LOCK INC WORD PTR [ECX] |
|
114 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
115 |
asm volatile ("lock; incw %0" : : "m" (*mptr) : "memory"); |
|
116 |
#elif defined(XT_ATOMIC_GCC_OPS)
|
|
117 |
__sync_fetch_and_add(mptr, 1); |
|
118 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
119 |
atomic_inc_16_nv(mptr); |
|
120 |
#else
|
|
121 |
(*mptr)++; |
|
122 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
123 |
#endif
|
|
124 |
}
|
|
125 |
||
126 |
inline void xt_atomic_dec2(volatile xtWord2 *mptr) |
|
127 |
{
|
|
128 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
129 |
__asm MOV ECX, mptr |
|
130 |
__asm LOCK DEC WORD PTR [ECX] |
|
131 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
132 |
asm volatile ("lock; decw %0" : : "m" (*mptr) : "memory"); |
|
133 |
#elif defined(XT_ATOMIC_GCC_OPS)
|
|
134 |
__sync_fetch_and_sub(mptr, 1); |
|
135 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
136 |
atomic_dec_16_nv(mptr); |
|
137 |
#else
|
|
138 |
--(*mptr); |
|
139 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
140 |
#endif
|
|
141 |
}
|
|
142 |
||
143 |
/* Atomic test and set 2 byte word! */
|
|
144 |
inline xtWord2 xt_atomic_tas2(volatile xtWord2 *mptr, xtWord2 val) |
|
145 |
{
|
|
146 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
147 |
__asm MOV ECX, mptr |
|
148 |
__asm MOV DX, val |
|
149 |
__asm XCHG DX, WORD PTR [ECX] |
|
150 |
__asm MOV val, DX |
|
151 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
152 |
asm volatile ("xchgw %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
|
153 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
154 |
val = atomic_swap_16(mptr, val); |
|
155 |
#else
|
|
156 |
/* Yikes! */
|
|
157 |
xtWord2 nval = val; |
|
158 |
||
159 |
val = *mptr; |
|
160 |
*mptr = nval; |
|
161 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
162 |
#endif
|
|
163 |
return val; |
|
164 |
}
|
|
165 |
||
166 |
inline void xt_atomic_set4(volatile xtWord4 *mptr, xtWord4 val) |
|
167 |
{
|
|
168 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
169 |
__asm MOV ECX, mptr |
|
170 |
__asm MOV EDX, val |
|
171 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
172 |
//__asm MOV DWORD PTR [ECX], EDX
|
|
173 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
174 |
asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
|
175 |
//asm volatile ("movl %0,%1" : "=r" (val) : "m" (*mptr) : "memory");
|
|
176 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
177 |
atomic_swap_32(mptr, val); |
|
178 |
#else
|
|
179 |
*mptr = val; |
|
180 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
181 |
#endif
|
|
182 |
}
|
|
183 |
||
184 |
inline xtWord4 xt_atomic_tas4(volatile xtWord4 *mptr, xtWord4 val) |
|
185 |
{
|
|
186 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
187 |
__asm MOV ECX, mptr |
|
188 |
__asm MOV EDX, val |
|
189 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
190 |
__asm MOV val, EDX |
|
191 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
192 |
val = val; |
|
193 |
asm volatile ("xchgl %1,%0" : "=r" (val) : "m" (*mptr), "0" (val) : "memory"); |
|
194 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
195 |
val = atomic_swap_32(mptr, val); |
|
196 |
#else
|
|
197 |
*mptr = val; |
|
198 |
xt_log_atomic_error_and_abort(__FUNC__, __FILE__, __LINE__); |
|
199 |
#endif
|
|
200 |
return val; |
|
201 |
}
|
|
202 |
||
203 |
/*
|
|
204 |
* -----------------------------------------------------------------------
|
|
205 |
* DIFFERENT TYPES OF LOCKS
|
|
206 |
*/
|
|
207 |
||
208 |
typedef struct XTSpinLock { |
|
209 |
volatile xtWord4 spl_lock; |
|
210 |
#ifdef XT_NO_ATOMICS
|
|
211 |
xt_mutex_type spl_mutex; |
|
212 |
#endif
|
|
213 |
#ifdef DEBUG
|
|
214 |
struct XTThread *spl_locker; |
|
215 |
#endif
|
|
216 |
#ifdef XT_THREAD_LOCK_INFO
|
|
217 |
XTThreadLockInfoRec spl_lock_info; |
|
218 |
const char *spl_name; |
|
219 |
#endif
|
|
220 |
} XTSpinLockRec, *XTSpinLockPtr; |
|
221 |
||
222 |
#ifdef XT_THREAD_LOCK_INFO
|
|
223 |
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
224 |
void xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp, const char *name); |
|
225 |
#else
|
|
226 |
#define xt_spinlock_init_with_autoname(a,b) xt_spinlock_init(a,b)
|
|
227 |
void xt_spinlock_init(struct XTThread *self, XTSpinLockPtr sp); |
|
228 |
#endif
|
|
229 |
void xt_spinlock_free(struct XTThread *self, XTSpinLockPtr sp); |
|
230 |
xtBool xt_spinlock_spin(XTSpinLockPtr spl); |
|
231 |
#ifdef DEBUG
|
|
232 |
void xt_spinlock_set_thread(XTSpinLockPtr spl); |
|
233 |
#endif
|
|
234 |
||
235 |
/* Code for test and set is derived from code by Larry Zhou and
|
|
236 |
* Google: http://code.google.com/p/google-perftools
|
|
237 |
*/
|
|
238 |
inline xtWord4 xt_spinlock_set(XTSpinLockPtr spl) |
|
239 |
{
|
|
240 |
xtWord4 prv; |
|
241 |
volatile xtWord4 *lck; |
|
242 |
||
243 |
lck = &spl->spl_lock; |
|
244 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
245 |
__asm MOV ECX, lck |
|
246 |
__asm MOV EDX, 1 |
|
247 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
248 |
__asm MOV prv, EDX |
|
249 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
250 |
prv = 1; |
|
251 |
asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory"); |
|
252 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
253 |
prv = atomic_swap_32(lck, 1); |
|
254 |
#else
|
|
255 |
/* The default implementation just uses a mutex, and
|
|
256 |
* does not spin! */
|
|
257 |
xt_lock_mutex_ns(&spl->spl_mutex); |
|
258 |
/* We have the lock */
|
|
259 |
*lck = 1; |
|
260 |
prv = 0; |
|
261 |
#endif
|
|
262 |
#ifdef DEBUG
|
|
263 |
if (!prv) |
|
264 |
xt_spinlock_set_thread(spl); |
|
265 |
#endif
|
|
266 |
return prv; |
|
267 |
}
|
|
268 |
||
269 |
inline xtWord4 xt_spinlock_reset(XTSpinLockPtr spl) |
|
270 |
{
|
|
271 |
xtWord4 prv; |
|
272 |
volatile xtWord4 *lck; |
|
273 |
||
274 |
#ifdef DEBUG
|
|
275 |
spl->spl_locker = NULL; |
|
276 |
#endif
|
|
277 |
lck = &spl->spl_lock; |
|
278 |
#ifdef XT_ATOMIC_WIN32_X86
|
|
279 |
__asm MOV ECX, lck |
|
280 |
__asm MOV EDX, 0 |
|
281 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
282 |
__asm MOV prv, EDX |
|
283 |
#elif defined(XT_ATOMIC_GNUC_X86)
|
|
284 |
prv = 0; |
|
285 |
asm volatile ("xchgl %1,%0" : "=r" (prv) : "m" (*lck), "0" (prv) : "memory"); |
|
286 |
#elif defined(XT_ATOMIC_SOLARIS_LIB)
|
|
287 |
prv = atomic_swap_32(lck, 0); |
|
288 |
#else
|
|
289 |
*lck = 0; |
|
290 |
xt_unlock_mutex_ns(&spl->spl_mutex); |
|
291 |
prv = 1; |
|
292 |
#endif
|
|
293 |
return prv; |
|
294 |
}
|
|
295 |
||
296 |
/*
|
|
297 |
* Return FALSE, and register an error on failure.
|
|
298 |
*/
|
|
299 |
inline xtBool xt_spinlock_lock(XTSpinLockPtr spl) |
|
300 |
{
|
|
301 |
if (!xt_spinlock_set(spl)) { |
|
302 |
#ifdef XT_THREAD_LOCK_INFO
|
|
303 |
xt_thread_lock_info_add_owner(&spl->spl_lock_info); |
|
304 |
#endif
|
|
305 |
return OK; |
|
306 |
}
|
|
307 |
#ifdef XT_THREAD_LOCK_INFO
|
|
308 |
xtBool spin_result = xt_spinlock_spin(spl); |
|
309 |
if (spin_result) |
|
310 |
xt_thread_lock_info_add_owner(&spl->spl_lock_info); |
|
311 |
return spin_result; |
|
312 |
#else
|
|
313 |
return xt_spinlock_spin(spl); |
|
314 |
#endif
|
|
315 |
}
|
|
316 |
||
317 |
inline void xt_spinlock_unlock(XTSpinLockPtr spl) |
|
318 |
{
|
|
319 |
xt_spinlock_reset(spl); |
|
320 |
#ifdef XT_THREAD_LOCK_INFO
|
|
321 |
xt_thread_lock_info_release_owner(&spl->spl_lock_info); |
|
322 |
#endif
|
|
323 |
}
|
|
324 |
||
325 |
#define XT_SXS_SLOCK_COUNT 2
|
|
326 |
||
327 |
typedef struct XTSpinXSLock { |
|
328 |
volatile xtWord2 sxs_xlocked; |
|
1753.3.1
by Paul McCullagh
Merged with 1.1 trunk |
329 |
volatile xtWord2 sxs_xwaiter; |
1455.3.1
by Vladimir Kolesnikov
lp:drizzle + pbxt 1.1 + test results |
330 |
volatile xtWord2 sxs_rlock_count; |
331 |
volatile xtWord2 sxs_wait_count; /* The number of readers waiting for the xlocker. */ |
|
332 |
#ifdef DEBUG
|
|
333 |
xtThreadID sxs_locker; |
|
334 |
#endif
|
|
335 |
#ifdef XT_THREAD_LOCK_INFO
|
|
336 |
XTThreadLockInfoRec sxs_lock_info; |
|
337 |
const char *sxs_name; |
|
338 |
#endif
|
|
339 |
} XTSpinXSLockRec, *XTSpinXSLockPtr; |
|
340 |
||
341 |
#ifdef XT_THREAD_LOCK_INFO
|
|
342 |
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
343 |
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs, const char *name); |
|
344 |
#else
|
|
345 |
#define xt_spinxslock_init_with_autoname(a,b) xt_spinxslock_init(a,b)
|
|
346 |
void xt_spinxslock_init(struct XTThread *self, XTSpinXSLockPtr sxs); |
|
347 |
#endif
|
|
348 |
void xt_spinxslock_free(struct XTThread *self, XTSpinXSLockPtr sxs); |
|
349 |
xtBool xt_spinxslock_xlock(XTSpinXSLockPtr sxs, xtBool try_lock, xtThreadID thd_id); |
|
350 |
xtBool xt_spinxslock_slock(XTSpinXSLockPtr sxs); |
|
351 |
xtBool xt_spinxslock_unlock(XTSpinXSLockPtr sxs, xtBool xlocked); |
|
352 |
||
353 |
typedef struct XTMutexXSLock { |
|
354 |
xt_mutex_type xsm_lock; |
|
355 |
xt_cond_type xsm_xcond; |
|
356 |
xt_cond_type xsm_rcond; |
|
357 |
volatile xtThreadID xsm_xlocker; |
|
358 |
volatile xtWord2 xsm_xwait_count; |
|
359 |
volatile xtWord2 xsm_rlock_count; |
|
360 |
volatile xtWord2 xsm_rwait_count; /* The number of readers waiting for the xlocker. */ |
|
361 |
#ifdef DEBUG
|
|
362 |
xtThreadID xsm_locker; |
|
363 |
#endif
|
|
364 |
#ifdef XT_THREAD_LOCK_INFO
|
|
365 |
XTThreadLockInfoRec xsm_lock_info; |
|
366 |
const char *xsm_name; |
|
367 |
#endif
|
|
368 |
} XTMutexXSLockRec, *XTMutexXSLockPtr; |
|
369 |
||
370 |
#ifdef XT_THREAD_LOCK_INFO
|
|
371 |
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
372 |
void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm, const char *name); |
|
373 |
#else
|
|
374 |
#define xt_xsmutex_init_with_autoname(a,b) xt_xsmutex_init(a,b)
|
|
375 |
void xt_xsmutex_init(struct XTThread *self, XTMutexXSLockPtr xsm); |
|
376 |
#endif
|
|
377 |
||
378 |
void xt_xsmutex_free(struct XTThread *self, XTMutexXSLockPtr xsm); |
|
379 |
xtBool xt_xsmutex_xlock(XTMutexXSLockPtr xsm, xtThreadID thd_id); |
|
380 |
xtBool xt_xsmutex_slock(XTMutexXSLockPtr xsm, xtThreadID thd_id); |
|
381 |
xtBool xt_xsmutex_unlock(XTMutexXSLockPtr xsm, xtThreadID thd_id); |
|
382 |
||
383 |
void xt_unit_test_read_write_locks(struct XTThread *self); |
|
384 |
void xt_unit_test_mutex_locks(struct XTThread *self); |
|
385 |
void xt_unit_test_create_threads(struct XTThread *self); |
|
386 |
||
387 |
/*
|
|
388 |
* -----------------------------------------------------------------------
|
|
389 |
* ROW LOCKS
|
|
390 |
*/
|
|
391 |
||
392 |
/*
|
|
393 |
* [(9)]
|
|
394 |
*
|
|
395 |
* These are perminent row locks. They are set on rows for 2 reasons:
|
|
396 |
*
|
|
397 |
* 1. To lock a row that is being updated. The row is locked
|
|
398 |
* when it is read, until the point that it is updated. If the row
|
|
399 |
* is not updated, the lock is removed.
|
|
400 |
* This prevents an update coming between which will cause an error
|
|
401 |
* on the first thread.
|
|
402 |
*
|
|
403 |
* 2. The locks are used to implement SELECT FOR UPDATE.
|
|
404 |
*/
|
|
405 |
||
406 |
/*
|
|
407 |
* A lock that is set in order to perform an update is a temporary lock.
|
|
408 |
* This lock will be removed once the update of the record is done.
|
|
409 |
* The objective is to prevent some other thread from changine the
|
|
410 |
* record between the time the record is read and updated. This is to
|
|
411 |
* prevent unncessary "Record was updated" errors.
|
|
412 |
*
|
|
413 |
* A permanent lock is set by a SELECT FOR UPDATE. These locks are
|
|
414 |
* held until the end of the transaction.
|
|
415 |
*
|
|
416 |
* However, a SELECT FOR UPDATE will pop its lock stack before
|
|
417 |
* waiting for a transaction that has updated a record.
|
|
418 |
* This is to prevent the deadlock that can occur because a
|
|
419 |
* SELECT FOR UPDATE locks groups of records (I mean in general the
|
|
420 |
* locks used are group locks).
|
|
421 |
*
|
|
422 |
* This means a SELECT FOR UPDATE can get ahead of an UPDATE as far as
|
|
423 |
* locking is concerned. Example:
|
|
424 |
*
|
|
425 |
* Record 1,2 and 3 are in group A.
|
|
426 |
*
|
|
427 |
* T1: UPDATES record 2.
|
|
428 |
* T2: SELECT FOR UPDATE record 1, which locks group A.
|
|
429 |
* T2: SELECT FOR UPDATE record 2, which must wait for T1.
|
|
430 |
* T1: UPDATES record 3, which musts wait because of group lock A.
|
|
431 |
*
|
|
432 |
* To avoid deadlock, T2 releases its group lock A before waiting for
|
|
433 |
* record 2. It then regains the lock after waiting for record 2.
|
|
434 |
*
|
|
435 |
* (NOTE: Locks are no longer released. Please check this comment:
|
|
436 |
* {RELEASING-LOCKS} in lock_xt.cc. )
|
|
437 |
*
|
|
438 |
* However, release group A lock mean first releasing all locks gained
|
|
439 |
* after group a lock.
|
|
440 |
*
|
|
441 |
* For example: a thread locks groups: A, B and C. To release group B
|
|
442 |
* lock the thread must release C as well. Afterwards, it must gain
|
|
443 |
* B and C again, in that order. This is to ensure that the lock
|
|
444 |
* order is NOT changed!
|
|
445 |
*
|
|
446 |
*/
|
|
447 |
#define XT_LOCK_ERR -1
|
|
448 |
#define XT_NO_LOCK 0
|
|
449 |
#define XT_TEMP_LOCK 1 /* A temporary lock */ |
|
450 |
#define XT_PERM_LOCK 2 /* A permanent lock */ |
|
451 |
||
452 |
typedef struct XTRowLockList : public XTBasicList { |
|
453 |
void xt_remove_all_locks(struct XTDatabase *db, struct XTThread *thread); |
|
454 |
} XTRowLockListRec, *XTRowLockListPtr; |
|
455 |
||
456 |
#define XT_USE_LIST_BASED_ROW_LOCKS
|
|
457 |
||
458 |
#ifdef XT_USE_LIST_BASED_ROW_LOCKS
|
|
459 |
/*
|
|
460 |
* This method stores each lock, and avoids conflicts.
|
|
461 |
* But it is a bit more expensive in time.
|
|
462 |
*/
|
|
463 |
||
464 |
#ifdef DEBUG
|
|
465 |
#define XT_TEMP_LOCK_BYTES 10
|
|
466 |
#define XT_ROW_LOCK_GROUP_COUNT 5
|
|
467 |
#else
|
|
468 |
#define XT_TEMP_LOCK_BYTES 0xFFFF
|
|
469 |
#define XT_ROW_LOCK_GROUP_COUNT 23
|
|
470 |
#endif
|
|
471 |
||
472 |
typedef struct XTLockWait { |
|
473 |
/* Information about the lock to be aquired: */
|
|
474 |
struct XTThread *lw_thread; |
|
475 |
struct XTOpenTable *lw_ot; |
|
476 |
xtRowID lw_row_id; |
|
477 |
||
478 |
/* This is the lock currently held, and the transaction ID: */
|
|
479 |
int lw_curr_lock; |
|
480 |
xtXactID lw_xn_id; |
|
481 |
||
482 |
/* This is information about the updating transaction: */
|
|
483 |
xtBool lw_row_updated; |
|
484 |
xtXactID lw_updating_xn_id; |
|
485 |
||
486 |
/* Pointers for the lock list: */
|
|
487 |
struct XTLockWait *lw_next; |
|
488 |
struct XTLockWait *lw_prev; |
|
489 |
} XTLockWaitRec, *XTLockWaitPtr; |
|
490 |
||
491 |
typedef struct XTLockItem { |
|
492 |
xtRowID li_row_id; /* The row list is sorted in this value. */ |
|
493 |
xtWord2 li_count; /* The number of consecutive rows locked. FFFF means a temporary lock. */ |
|
494 |
xtWord2 li_thread_id; /* The thread that holds this lock. */ |
|
495 |
} XTLockItemRec, *XTLockItemPtr; |
|
496 |
||
497 |
typedef struct XTLockGroup { |
|
498 |
XTSpinLockRec lg_lock; /* A lock for the list. */ |
|
499 |
XTLockWaitPtr lg_wait_queue; /* A queue of threads waiting for a lock in this group. */ |
|
500 |
XTLockWaitPtr lg_wait_queue_end; /* The end of the thread queue. */ |
|
501 |
size_t lg_list_size; /* The size of the list. */ |
|
502 |
size_t lg_list_in_use; /* Number of slots on the list in use. */ |
|
503 |
XTLockItemPtr lg_list; /* List of locks. */ |
|
504 |
} XTLockGroupRec, *XTLockGroupPtr; |
|
505 |
||
506 |
struct XTLockWait; |
|
507 |
||
508 |
typedef struct XTRowLocks { |
|
509 |
XTLockGroupRec rl_groups[XT_ROW_LOCK_GROUP_COUNT]; |
|
510 |
||
511 |
void xt_cancel_temp_lock(XTLockWaitPtr lw); |
|
512 |
xtBool xt_set_temp_lock(struct XTOpenTable *ot, XTLockWaitPtr lw, XTRowLockListPtr lock_list); |
|
513 |
void xt_remove_temp_lock(struct XTOpenTable *ot, xtBool updated); |
|
514 |
xtBool xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list); |
|
515 |
||
516 |
xtBool rl_lock_row(XTLockGroupPtr group, XTLockWaitPtr lw, XTRowLockListPtr lock_list, int *result, struct XTThread *thread); |
|
517 |
void rl_grant_locks(XTLockGroupPtr group, struct XTThread *thread); |
|
518 |
#ifdef DEBUG_LOCK_QUEUE
|
|
519 |
void rl_check(XTLockWaitPtr lw); |
|
520 |
#endif
|
|
521 |
} XTRowLocksRec, *XTRowLocksPtr; |
|
522 |
||
523 |
#define XT_USE_TABLE_REF
|
|
524 |
||
525 |
typedef struct XTPermRowLock { |
|
526 |
#ifdef XT_USE_TABLE_REF
|
|
527 |
struct XTTable *pr_table; |
|
528 |
#else
|
|
529 |
xtTableID pr_tab_id; |
|
530 |
#endif
|
|
531 |
xtWord1 pr_group[XT_ROW_LOCK_GROUP_COUNT]; |
|
532 |
} XTPermRowLockRec, *XTPermRowLockPtr; |
|
533 |
||
534 |
#else // XT_ROW_LOCK_GROUP_COUNT |
|
535 |
||
536 |
/* Hash based row locking. This method allows conflics, even
|
|
537 |
* when there is none.
|
|
538 |
*/
|
|
539 |
typedef struct XTRowLocks { |
|
540 |
xtWord1 tab_lock_perm[XT_ROW_LOCK_COUNT]; /* Byte set to 1 for permanent locks. */ |
|
541 |
struct XTXactData *tab_row_locks[XT_ROW_LOCK_COUNT]; /* The transactions that have locked the specific rows. */ |
|
542 |
||
543 |
int xt_set_temp_lock(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id, XTRowLockListPtr lock_list); |
|
544 |
void xt_remove_temp_lock(struct XTOpenTable *ot); |
|
545 |
xtBool xt_make_lock_permanent(struct XTOpenTable *ot, XTRowLockListPtr lock_list); |
|
546 |
int xt_is_locked(struct XTOpenTable *ot, xtRowID row, xtXactID *xn_id); |
|
547 |
} XTRowLocksRec, *XTRowLocksPtr; |
|
548 |
||
549 |
typedef struct XTPermRowLock { |
|
550 |
xtTableID pr_tab_id; |
|
551 |
xtWord4 pr_group; |
|
552 |
} XTPermRowLockRec, *XTPermRowLockPtr; |
|
553 |
||
554 |
#endif // XT_ROW_LOCK_GROUP_COUNT |
|
555 |
||
556 |
xtBool xt_init_row_locks(XTRowLocksPtr rl); |
|
557 |
void xt_exit_row_locks(XTRowLocksPtr rl); |
|
558 |
||
559 |
xtBool xt_init_row_lock_list(XTRowLockListPtr rl); |
|
560 |
void xt_exit_row_lock_list(XTRowLockListPtr rl); |
|
561 |
||
562 |
#define XT_NO_LOCK 0
|
|
563 |
#define XT_WANT_LOCK 1
|
|
564 |
#define XT_HAVE_LOCK 2
|
|
565 |
#define XT_WAITING 3
|
|
566 |
||
1753.3.1
by Paul McCullagh
Merged with 1.1 trunk |
567 |
/*
|
568 |
* -----------------------------------------------------------------------
|
|
569 |
* RECURSIVE MUTEX (allows lockers to lock again)
|
|
570 |
*/
|
|
571 |
||
572 |
typedef struct XTRecursiveMutex { |
|
573 |
struct XTThread *rm_locker; |
|
574 |
u_int rm_lock_count; |
|
575 |
xt_mutex_type rm_mutex; |
|
576 |
||
577 |
#ifdef XT_THREAD_LOCK_INFO
|
|
578 |
XTThreadLockInfoRec rm_lock_info; |
|
579 |
const char *rm_name; |
|
580 |
#endif
|
|
581 |
} XTRecursiveMutexRec, *XTRecursiveMutexPtr; |
|
582 |
||
583 |
#ifdef XT_THREAD_LOCK_INFO
|
|
584 |
#define xt_recursivemutex_init_with_autoname(a,b) xt_recursivemutex_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
585 |
void xt_recursivemutex_init(struct XTThread *self, XTRecursiveMutexPtr rm, const char *name); |
|
586 |
#else
|
|
587 |
#define xt_recursivemutex_init_with_autoname(a,b) xt_recursivemutex_init(a,b)
|
|
588 |
void xt_recursivemutex_init(struct XTThread *self, XTRecursiveMutexPtr rm); |
|
589 |
#endif
|
|
590 |
void xt_recursivemutex_free(XTRecursiveMutexPtr rm); |
|
591 |
void xt_recursivemutex_lock(struct XTThread *self, XTRecursiveMutexPtr rm); |
|
592 |
void xt_recursivemutex_unlock(struct XTThread *self, XTRecursiveMutexPtr rm); |
|
593 |
||
594 |
typedef struct XTRecurRWLock { |
|
595 |
struct XTThread *rrw_locker; |
|
596 |
u_int rrw_lock_count; |
|
597 |
xt_rwlock_type rrw_lock; |
|
598 |
||
599 |
#ifdef XT_THREAD_LOCK_INFO
|
|
600 |
XTThreadLockInfoRec rrw_lock_info; |
|
601 |
const char *rrw_name; |
|
602 |
#endif
|
|
603 |
} XTRecurRWLockRec, *XTRecurRWLockPtr; |
|
604 |
||
605 |
#ifdef XT_THREAD_LOCK_INFO
|
|
606 |
#define xt_recurrwlock_init_with_autoname(a,b) xt_recurrwlock_init(a,b,LOCKLIST_ARG_SUFFIX(b))
|
|
607 |
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw, const char *name); |
|
608 |
#else
|
|
609 |
#define xt_recurrwlock_init_with_autoname(a,b) xt_recurrwlock_init(a,b)
|
|
610 |
void xt_recurrwlock_init(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
611 |
#endif
|
|
612 |
void xt_recurrwlock_free(XTRecurRWLockPtr rrw); |
|
613 |
void xt_recurrwlock_xlock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
614 |
void xt_recurrwlock_slock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
615 |
void xt_recurrwlock_slock_ns(XTRecurRWLockPtr rrw); |
|
616 |
void xt_recurrwlock_unxlock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
617 |
void xt_recurrwlock_unslock(struct XTThread *self, XTRecurRWLockPtr rrw); |
|
618 |
void xt_recurrwlock_unslock_ns(XTRecurRWLockPtr rrw); |
|
619 |
||
1455.3.1
by Vladimir Kolesnikov
lp:drizzle + pbxt 1.1 + test results |
620 |
#endif
|