Mercurial > trustbridge > nss-cmake-static
comparison nspr/pr/src/md/windows/w95cv.c @ 0:1e5118fa0cb1
This is NSS with a Cmake Buildsyste
To compile a static NSS library for Windows we've used the
Chromium-NSS fork and added a Cmake buildsystem to compile
it statically for Windows. See README.chromium for chromium
changes and README.trustbridge for our modifications.
author | Andre Heinecke <andre.heinecke@intevation.de> |
---|---|
date | Mon, 28 Jul 2014 10:47:06 +0200 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:1e5118fa0cb1 |
---|---|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
5 | |
6 /* | |
7 * w95cv.c -- Windows 95 Machine-Dependent Code for Condition Variables | |
8 * | |
9 * We implement our own condition variable wait queue. Each thread | |
10 * has a semaphore object (thread->md.blocked_sema) to block on while | |
11 * waiting on a condition variable. | |
12 * | |
13 * We use a deferred condition notify algorithm. When PR_NotifyCondVar | |
14 * or PR_NotifyAllCondVar is called, the condition notifies are simply | |
15 * recorded in the _MDLock structure. We defer the condition notifies | |
16 * until right after we unlock the lock. This way the awakened threads | |
17 * have a better chance to reaquire the lock. | |
18 */ | |
19 | |
20 #include "primpl.h" | |
21 | |
22 /* | |
23 * AddThreadToCVWaitQueueInternal -- | |
24 * | |
25 * Add the thread to the end of the condition variable's wait queue. | |
26 * The CV's lock must be locked when this function is called. | |
27 */ | |
28 | |
29 static void | |
30 AddThreadToCVWaitQueueInternal(PRThread *thred, struct _MDCVar *cv) | |
31 { | |
32 PR_ASSERT((cv->waitTail != NULL && cv->waitHead != NULL) | |
33 || (cv->waitTail == NULL && cv->waitHead == NULL)); | |
34 cv->nwait += 1; | |
35 thred->md.inCVWaitQueue = PR_TRUE; | |
36 thred->md.next = NULL; | |
37 thred->md.prev = cv->waitTail; | |
38 if (cv->waitHead == NULL) { | |
39 cv->waitHead = thred; | |
40 } else { | |
41 cv->waitTail->md.next = thred; | |
42 } | |
43 cv->waitTail = thred; | |
44 } | |
45 | |
46 /* | |
47 * md_UnlockAndPostNotifies -- | |
48 * | |
49 * Unlock the lock, and then do the deferred condition notifies. | |
50 * If waitThred and waitCV are not NULL, waitThred is added to | |
51 * the wait queue of waitCV before the lock is unlocked. | |
52 * | |
53 * This function is called by _PR_MD_WAIT_CV and _PR_MD_UNLOCK, | |
54 * the two places where a lock is unlocked. | |
55 */ | |
56 static void | |
57 md_UnlockAndPostNotifies( | |
58 _MDLock *lock, | |
59 PRThread *waitThred, | |
60 _MDCVar *waitCV) | |
61 { | |
62 PRIntn index; | |
63 _MDNotified post; | |
64 _MDNotified *notified, *prev = NULL; | |
65 | |
66 /* | |
67 * Time to actually notify any conditions that were affected | |
68 * while the lock was held. Get a copy of the list that's in | |
69 * the lock structure and then zero the original. If it's | |
70 * linked to other such structures, we own that storage. | |
71 */ | |
72 post = lock->notified; /* a safe copy; we own the lock */ | |
73 | |
74 #if defined(DEBUG) | |
75 ZeroMemory(&lock->notified, sizeof(_MDNotified)); /* reset */ | |
76 #else | |
77 lock->notified.length = 0; /* these are really sufficient */ | |
78 lock->notified.link = NULL; | |
79 #endif | |
80 | |
81 /* | |
82 * Figure out how many threads we need to wake up. | |
83 */ | |
84 notified = &post; /* this is where we start */ | |
85 do { | |
86 for (index = 0; index < notified->length; ++index) { | |
87 _MDCVar *cv = notified->cv[index].cv; | |
88 PRThread *thred; | |
89 int i; | |
90 | |
91 /* Fast special case: no waiting threads */ | |
92 if (cv->waitHead == NULL) { | |
93 notified->cv[index].notifyHead = NULL; | |
94 continue; | |
95 } | |
96 | |
97 /* General case */ | |
98 if (-1 == notified->cv[index].times) { | |
99 /* broadcast */ | |
100 thred = cv->waitHead; | |
101 while (thred != NULL) { | |
102 thred->md.inCVWaitQueue = PR_FALSE; | |
103 thred = thred->md.next; | |
104 } | |
105 notified->cv[index].notifyHead = cv->waitHead; | |
106 cv->waitHead = cv->waitTail = NULL; | |
107 cv->nwait = 0; | |
108 } else { | |
109 thred = cv->waitHead; | |
110 i = notified->cv[index].times; | |
111 while (thred != NULL && i > 0) { | |
112 thred->md.inCVWaitQueue = PR_FALSE; | |
113 thred = thred->md.next; | |
114 i--; | |
115 } | |
116 notified->cv[index].notifyHead = cv->waitHead; | |
117 cv->waitHead = thred; | |
118 if (cv->waitHead == NULL) { | |
119 cv->waitTail = NULL; | |
120 } else { | |
121 if (cv->waitHead->md.prev != NULL) { | |
122 cv->waitHead->md.prev->md.next = NULL; | |
123 cv->waitHead->md.prev = NULL; | |
124 } | |
125 } | |
126 cv->nwait -= notified->cv[index].times - i; | |
127 } | |
128 } | |
129 notified = notified->link; | |
130 } while (NULL != notified); | |
131 | |
132 if (waitThred) { | |
133 AddThreadToCVWaitQueueInternal(waitThred, waitCV); | |
134 } | |
135 | |
136 /* Release the lock before notifying */ | |
137 LeaveCriticalSection(&lock->mutex); | |
138 | |
139 notified = &post; /* this is where we start */ | |
140 do { | |
141 for (index = 0; index < notified->length; ++index) { | |
142 PRThread *thred; | |
143 PRThread *next; | |
144 | |
145 thred = notified->cv[index].notifyHead; | |
146 while (thred != NULL) { | |
147 BOOL rv; | |
148 | |
149 next = thred->md.next; | |
150 thred->md.prev = thred->md.next = NULL; | |
151 | |
152 rv = ReleaseSemaphore(thred->md.blocked_sema, 1, NULL); | |
153 PR_ASSERT(rv != 0); | |
154 thred = next; | |
155 } | |
156 } | |
157 prev = notified; | |
158 notified = notified->link; | |
159 if (&post != prev) PR_DELETE(prev); | |
160 } while (NULL != notified); | |
161 } | |
162 | |
163 /* | |
164 * Notifies just get posted to the protecting mutex. The | |
165 * actual notification is done when the lock is released so that | |
166 * MP systems don't contend for a lock that they can't have. | |
167 */ | |
168 static void md_PostNotifyToCvar(_MDCVar *cvar, _MDLock *lock, | |
169 PRBool broadcast) | |
170 { | |
171 PRIntn index = 0; | |
172 _MDNotified *notified = &lock->notified; | |
173 | |
174 while (1) { | |
175 for (index = 0; index < notified->length; ++index) { | |
176 if (notified->cv[index].cv == cvar) { | |
177 if (broadcast) { | |
178 notified->cv[index].times = -1; | |
179 } else if (-1 != notified->cv[index].times) { | |
180 notified->cv[index].times += 1; | |
181 } | |
182 return; | |
183 } | |
184 } | |
185 /* if not full, enter new CV in this array */ | |
186 if (notified->length < _MD_CV_NOTIFIED_LENGTH) break; | |
187 | |
188 /* if there's no link, create an empty array and link it */ | |
189 if (NULL == notified->link) { | |
190 notified->link = PR_NEWZAP(_MDNotified); | |
191 } | |
192 | |
193 notified = notified->link; | |
194 } | |
195 | |
196 /* A brand new entry in the array */ | |
197 notified->cv[index].times = (broadcast) ? -1 : 1; | |
198 notified->cv[index].cv = cvar; | |
199 notified->length += 1; | |
200 } | |
201 | |
202 /* | |
203 * _PR_MD_NEW_CV() -- Creating new condition variable | |
204 * ... Solaris uses cond_init() in similar function. | |
205 * | |
206 * returns: -1 on failure | |
207 * 0 when it succeeds. | |
208 * | |
209 */ | |
210 PRInt32 | |
211 _PR_MD_NEW_CV(_MDCVar *cv) | |
212 { | |
213 cv->magic = _MD_MAGIC_CV; | |
214 /* | |
215 * The waitHead, waitTail, and nwait fields are zeroed | |
216 * when the PRCondVar structure is created. | |
217 */ | |
218 return 0; | |
219 } | |
220 | |
221 void _PR_MD_FREE_CV(_MDCVar *cv) | |
222 { | |
223 cv->magic = (PRUint32)-1; | |
224 return; | |
225 } | |
226 | |
227 /* | |
228 * _PR_MD_WAIT_CV() -- Wait on condition variable | |
229 */ | |
230 void _PR_MD_WAIT_CV(_MDCVar *cv, _MDLock *lock, PRIntervalTime timeout ) | |
231 { | |
232 PRThread *thred = _PR_MD_CURRENT_THREAD(); | |
233 DWORD rv; | |
234 DWORD msecs = (timeout == PR_INTERVAL_NO_TIMEOUT) ? | |
235 INFINITE : PR_IntervalToMilliseconds(timeout); | |
236 | |
237 /* | |
238 * If we have pending notifies, post them now. | |
239 */ | |
240 if (0 != lock->notified.length) { | |
241 md_UnlockAndPostNotifies(lock, thred, cv); | |
242 } else { | |
243 AddThreadToCVWaitQueueInternal(thred, cv); | |
244 LeaveCriticalSection(&lock->mutex); | |
245 } | |
246 | |
247 /* Wait for notification or timeout; don't really care which */ | |
248 rv = WaitForSingleObject(thred->md.blocked_sema, msecs); | |
249 | |
250 EnterCriticalSection(&(lock->mutex)); | |
251 | |
252 PR_ASSERT(rv != WAIT_ABANDONED); | |
253 PR_ASSERT(rv != WAIT_FAILED); | |
254 PR_ASSERT(rv != WAIT_OBJECT_0 || thred->md.inCVWaitQueue == PR_FALSE); | |
255 | |
256 if (rv == WAIT_TIMEOUT) { | |
257 if (thred->md.inCVWaitQueue) { | |
258 PR_ASSERT((cv->waitTail != NULL && cv->waitHead != NULL) | |
259 || (cv->waitTail == NULL && cv->waitHead == NULL)); | |
260 cv->nwait -= 1; | |
261 thred->md.inCVWaitQueue = PR_FALSE; | |
262 if (cv->waitHead == thred) { | |
263 cv->waitHead = thred->md.next; | |
264 if (cv->waitHead == NULL) { | |
265 cv->waitTail = NULL; | |
266 } else { | |
267 cv->waitHead->md.prev = NULL; | |
268 } | |
269 } else { | |
270 PR_ASSERT(thred->md.prev != NULL); | |
271 thred->md.prev->md.next = thred->md.next; | |
272 if (thred->md.next != NULL) { | |
273 thred->md.next->md.prev = thred->md.prev; | |
274 } else { | |
275 PR_ASSERT(cv->waitTail == thred); | |
276 cv->waitTail = thred->md.prev; | |
277 } | |
278 } | |
279 thred->md.next = thred->md.prev = NULL; | |
280 } else { | |
281 /* | |
282 * This thread must have been notified, but the | |
283 * ReleaseSemaphore call happens after WaitForSingleObject | |
284 * times out. Wait on the semaphore again to make it | |
285 * non-signaled. We assume this wait won't take long. | |
286 */ | |
287 rv = WaitForSingleObject(thred->md.blocked_sema, INFINITE); | |
288 PR_ASSERT(rv == WAIT_OBJECT_0); | |
289 } | |
290 } | |
291 PR_ASSERT(thred->md.inCVWaitQueue == PR_FALSE); | |
292 return; | |
293 } /* --- end _PR_MD_WAIT_CV() --- */ | |
294 | |
295 void _PR_MD_NOTIFY_CV(_MDCVar *cv, _MDLock *lock) | |
296 { | |
297 md_PostNotifyToCvar(cv, lock, PR_FALSE); | |
298 return; | |
299 } | |
300 | |
301 void _PR_MD_NOTIFYALL_CV(_MDCVar *cv, _MDLock *lock) | |
302 { | |
303 md_PostNotifyToCvar(cv, lock, PR_TRUE); | |
304 return; | |
305 } | |
306 | |
307 typedef BOOL (WINAPI *INITIALIZECRITICALSECTIONEX)( | |
308 CRITICAL_SECTION *lpCriticalSection, | |
309 DWORD dwSpinCount, | |
310 DWORD Flags); | |
311 | |
312 static INITIALIZECRITICALSECTIONEX sInitializeCriticalSectionEx; | |
313 | |
314 void _PR_MD_INIT_LOCKS(void) | |
315 { | |
316 /* | |
317 * Starting with Windows Vista, every CRITICAL_SECTION allocates an extra | |
318 * RTL_CRITICAL_SECTION_DEBUG object. Unfortunately, this debug object is | |
319 * not reclaimed by DeleteCriticalSection(), causing an apparent memory | |
320 * leak. This is a debugging "feature", not a bug. If we are running on | |
321 * Vista or later, use InitializeCriticalSectionEx() to allocate | |
322 * CRITICAL_SECTIONs without debug objects. | |
323 */ | |
324 HMODULE hKernel32 = GetModuleHandle("kernel32.dll"); | |
325 PR_ASSERT(hKernel32); | |
326 PR_ASSERT(!sInitializeCriticalSectionEx); | |
327 sInitializeCriticalSectionEx = (INITIALIZECRITICALSECTIONEX) | |
328 GetProcAddress(hKernel32, "InitializeCriticalSectionEx"); | |
329 } | |
330 | |
331 /* | |
332 * By default, CRITICAL_SECTIONs are initialized with a spin count of 0. | |
333 * Joe Duffy's "Concurrent Programming on Windows" book suggests 1500 is | |
334 * a "reasonable starting point". On single-processor systems, the spin | |
335 * count is ignored and the critical section spin count is set to 0. | |
336 */ | |
337 #define LOCK_SPIN_COUNT 1500 | |
338 | |
339 PRStatus _PR_MD_NEW_LOCK(_MDLock *lock) | |
340 { | |
341 CRITICAL_SECTION *cs = &lock->mutex; | |
342 BOOL ok; | |
343 | |
344 if (sInitializeCriticalSectionEx) { | |
345 ok = sInitializeCriticalSectionEx(cs, LOCK_SPIN_COUNT, | |
346 CRITICAL_SECTION_NO_DEBUG_INFO); | |
347 } else { | |
348 ok = InitializeCriticalSectionAndSpinCount(cs, LOCK_SPIN_COUNT); | |
349 } | |
350 if (!ok) { | |
351 _PR_MD_MAP_DEFAULT_ERROR(GetLastError()); | |
352 return PR_FAILURE; | |
353 } | |
354 | |
355 lock->notified.length = 0; | |
356 lock->notified.link = NULL; | |
357 return PR_SUCCESS; | |
358 } | |
359 | |
360 void _PR_MD_UNLOCK(_MDLock *lock) | |
361 { | |
362 if (0 != lock->notified.length) { | |
363 md_UnlockAndPostNotifies(lock, NULL, NULL); | |
364 } else { | |
365 LeaveCriticalSection(&lock->mutex); | |
366 } | |
367 } |