1
by brian
clean slate |
1 |
/******************************************************
|
2 |
Mutex, the basic synchronization primitive |
|
3 |
||
4 |
(c) 1995 Innobase Oy |
|
5 |
||
6 |
Created 9/5/1995 Heikki Tuuri |
|
7 |
*******************************************************/
|
|
8 |
||
9 |
#if defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86)
|
|
10 |
/* %z0: Use the size of operand %0 which in our case is *m to determine |
|
11 |
instruction size, it should end up as xchgl. "1" in the input constraint, |
|
12 |
says that "in" has to go in the same place as "out".*/ |
|
13 |
#define TAS(m, in, out) \
|
|
14 |
asm volatile ("xchg%z0 %2, %0" \ |
|
15 |
: "=g" (*(m)), "=r" (out) \ |
|
16 |
: "1" (in)) /* Note: "1" here refers to "=r" (out) */ |
|
17 |
#endif
|
|
18 |
||
19 |
/**********************************************************************
|
|
20 |
Sets the waiters field in a mutex. */ |
|
21 |
||
22 |
void
|
|
23 |
mutex_set_waiters( |
|
24 |
/*==============*/
|
|
25 |
mutex_t* mutex, /* in: mutex */ |
|
26 |
ulint n); /* in: value to set */ |
|
27 |
/**********************************************************************
|
|
28 |
Reserves a mutex for the current thread. If the mutex is reserved, the |
|
29 |
function spins a preset time (controlled by SYNC_SPIN_ROUNDS) waiting |
|
30 |
for the mutex before suspending the thread. */ |
|
31 |
||
32 |
void
|
|
33 |
mutex_spin_wait( |
|
34 |
/*============*/
|
|
35 |
mutex_t* mutex, /* in: pointer to mutex */ |
|
36 |
const char* file_name, /* in: file name where mutex |
|
37 |
requested */ |
|
38 |
ulint line); /* in: line where requested */ |
|
39 |
#ifdef UNIV_SYNC_DEBUG
|
|
40 |
/**********************************************************************
|
|
41 |
Sets the debug information for a reserved mutex. */ |
|
42 |
||
43 |
void
|
|
44 |
mutex_set_debug_info( |
|
45 |
/*=================*/
|
|
46 |
mutex_t* mutex, /* in: mutex */ |
|
47 |
const char* file_name, /* in: file where requested */ |
|
48 |
ulint line); /* in: line where requested */ |
|
49 |
#endif /* UNIV_SYNC_DEBUG */
|
|
50 |
/**********************************************************************
|
|
51 |
Releases the threads waiting in the primary wait array for this mutex. */ |
|
52 |
||
53 |
void
|
|
54 |
mutex_signal_object( |
|
55 |
/*================*/
|
|
56 |
mutex_t* mutex); /* in: mutex */ |
|
57 |
||
58 |
/**********************************************************************
|
|
59 |
Performs an atomic test-and-set instruction to the lock_word field of a |
|
60 |
mutex. */ |
|
61 |
UNIV_INLINE
|
|
62 |
ulint
|
|
63 |
mutex_test_and_set( |
|
64 |
/*===============*/
|
|
65 |
/* out: the previous value of lock_word: 0 or |
|
66 |
1 */ |
|
67 |
mutex_t* mutex) /* in: mutex */ |
|
68 |
{
|
|
69 |
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
|
|
70 |
ulint res; |
|
71 |
ulint* lw; /* assembler code is used to ensure that |
|
72 |
lock_word is loaded from memory */ |
|
73 |
ut_ad(mutex); |
|
74 |
ut_ad(sizeof(ulint) == 4); |
|
75 |
||
76 |
lw = &(mutex->lock_word); |
|
77 |
||
78 |
__asm MOV ECX, lw |
|
79 |
__asm MOV EDX, 1 |
|
80 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
81 |
__asm MOV res, EDX |
|
82 |
||
83 |
/* The fence below would prevent this thread from |
|
84 |
reading the data structure protected by the mutex |
|
85 |
before the test-and-set operation is committed, but |
|
86 |
the fence is apparently not needed: |
|
87 |
||
88 |
In a posting to comp.arch newsgroup (August 10, 1997) |
|
89 |
Andy Glew said that in P6 a LOCKed instruction like |
|
90 |
XCHG establishes a fence with respect to memory reads |
|
91 |
and writes and thus an explicit fence is not |
|
92 |
needed. In P5 he seemed to agree with a previous |
|
93 |
newsgroup poster that LOCKed instructions serialize |
|
94 |
all instruction execution, and, consequently, also |
|
95 |
memory operations. This is confirmed in Intel Software |
|
96 |
Dev. Manual, Vol. 3. */ |
|
97 |
||
98 |
/* mutex_fence(); */ |
|
99 |
||
100 |
return(res); |
|
101 |
#elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86)
|
|
102 |
ulint res; |
|
103 |
||
104 |
TAS(&mutex->lock_word, 1, res); |
|
105 |
||
106 |
return(res); |
|
107 |
#else
|
|
108 |
ibool ret; |
|
109 |
||
110 |
ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex)); |
|
111 |
||
112 |
if (ret == 0) { |
|
113 |
/* We check that os_fast_mutex_trylock does not leak |
|
114 |
and allow race conditions */ |
|
115 |
ut_a(mutex->lock_word == 0); |
|
116 |
||
117 |
mutex->lock_word = 1; |
|
118 |
}
|
|
119 |
||
120 |
return(ret); |
|
121 |
#endif
|
|
122 |
}
|
|
123 |
||
124 |
/**********************************************************************
|
|
125 |
Performs a reset instruction to the lock_word field of a mutex. This |
|
126 |
instruction also serializes memory operations to the program order. */ |
|
127 |
UNIV_INLINE
|
|
128 |
void
|
|
129 |
mutex_reset_lock_word( |
|
130 |
/*==================*/
|
|
131 |
mutex_t* mutex) /* in: mutex */ |
|
132 |
{
|
|
133 |
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
|
|
134 |
ulint* lw; /* assembler code is used to ensure that |
|
135 |
lock_word is loaded from memory */ |
|
136 |
ut_ad(mutex); |
|
137 |
||
138 |
lw = &(mutex->lock_word); |
|
139 |
||
140 |
__asm MOV EDX, 0 |
|
141 |
__asm MOV ECX, lw |
|
142 |
__asm XCHG EDX, DWORD PTR [ECX] |
|
143 |
#elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86)
|
|
144 |
ulint res; |
|
145 |
||
146 |
TAS(&mutex->lock_word, 0, res); |
|
147 |
#else
|
|
148 |
mutex->lock_word = 0; |
|
149 |
||
150 |
os_fast_mutex_unlock(&(mutex->os_fast_mutex)); |
|
151 |
#endif
|
|
152 |
}
|
|
153 |
||
154 |
/**********************************************************************
|
|
155 |
Gets the value of the lock word. */ |
|
156 |
UNIV_INLINE
|
|
157 |
ulint
|
|
158 |
mutex_get_lock_word( |
|
159 |
/*================*/
|
|
160 |
const mutex_t* mutex) /* in: mutex */ |
|
161 |
{
|
|
162 |
const volatile ulint* ptr; /* declared volatile to ensure that |
|
163 |
lock_word is loaded from memory */ |
|
164 |
ut_ad(mutex); |
|
165 |
||
166 |
ptr = &(mutex->lock_word); |
|
167 |
||
168 |
return(*ptr); |
|
169 |
}
|
|
170 |
||
171 |
/**********************************************************************
|
|
172 |
Gets the waiters field in a mutex. */ |
|
173 |
UNIV_INLINE
|
|
174 |
ulint
|
|
175 |
mutex_get_waiters( |
|
176 |
/*==============*/
|
|
177 |
/* out: value to set */ |
|
178 |
const mutex_t* mutex) /* in: mutex */ |
|
179 |
{
|
|
180 |
const volatile ulint* ptr; /* declared volatile to ensure that |
|
181 |
the value is read from memory */ |
|
182 |
ut_ad(mutex); |
|
183 |
||
184 |
ptr = &(mutex->waiters); |
|
185 |
||
186 |
return(*ptr); /* Here we assume that the read of a single |
|
187 |
word from memory is atomic */ |
|
188 |
}
|
|
189 |
||
190 |
/**********************************************************************
|
|
191 |
Unlocks a mutex owned by the current thread. */ |
|
192 |
UNIV_INLINE
|
|
193 |
void
|
|
194 |
mutex_exit( |
|
195 |
/*=======*/
|
|
196 |
mutex_t* mutex) /* in: pointer to mutex */ |
|
197 |
{
|
|
198 |
ut_ad(mutex_own(mutex)); |
|
199 |
||
200 |
ut_d(mutex->thread_id = ULINT_UNDEFINED); |
|
201 |
||
202 |
#ifdef UNIV_SYNC_DEBUG
|
|
203 |
sync_thread_reset_level(mutex); |
|
204 |
#endif
|
|
205 |
mutex_reset_lock_word(mutex); |
|
206 |
||
207 |
/* A problem: we assume that mutex_reset_lock word |
|
208 |
is a memory barrier, that is when we read the waiters |
|
209 |
field next, the read must be serialized in memory |
|
210 |
after the reset. A speculative processor might |
|
211 |
perform the read first, which could leave a waiting |
|
212 |
thread hanging indefinitely. |
|
213 |
||
214 |
Our current solution call every 10 seconds |
|
215 |
sync_arr_wake_threads_if_sema_free() |
|
216 |
to wake up possible hanging threads if |
|
217 |
they are missed in mutex_signal_object. */ |
|
218 |
||
219 |
if (mutex_get_waiters(mutex) != 0) { |
|
220 |
||
221 |
mutex_signal_object(mutex); |
|
222 |
}
|
|
223 |
||
224 |
#ifdef UNIV_SYNC_PERF_STAT
|
|
225 |
mutex_exit_count++; |
|
226 |
#endif
|
|
227 |
}
|
|
228 |
||
229 |
/**********************************************************************
|
|
230 |
Locks a mutex for the current thread. If the mutex is reserved, the function |
|
231 |
spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for the mutex |
|
232 |
before suspending the thread. */ |
|
233 |
UNIV_INLINE
|
|
234 |
void
|
|
235 |
mutex_enter_func( |
|
236 |
/*=============*/
|
|
237 |
mutex_t* mutex, /* in: pointer to mutex */ |
|
238 |
const char* file_name, /* in: file name where locked */ |
|
239 |
ulint line) /* in: line where locked */ |
|
240 |
{
|
|
241 |
ut_ad(mutex_validate(mutex)); |
|
242 |
ut_ad(!mutex_own(mutex)); |
|
243 |
||
244 |
/* Note that we do not peek at the value of lock_word before trying |
|
245 |
the atomic test_and_set; we could peek, and possibly save time. */ |
|
246 |
||
247 |
#if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP
|
|
248 |
mutex->count_using++; |
|
249 |
#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
|
|
250 |
||
251 |
if (!mutex_test_and_set(mutex)) { |
|
252 |
ut_d(mutex->thread_id = os_thread_get_curr_id()); |
|
253 |
#ifdef UNIV_SYNC_DEBUG
|
|
254 |
mutex_set_debug_info(mutex, file_name, line); |
|
255 |
#endif
|
|
256 |
return; /* Succeeded! */ |
|
257 |
}
|
|
258 |
||
259 |
mutex_spin_wait(mutex, file_name, line); |
|
260 |
}
|