Mercurial > trustbridge > nss-cmake-static
comparison nspr/pr/src/threads/prcmon.c @ 0:1e5118fa0cb1
This is NSS with a Cmake Buildsyste
To compile a static NSS library for Windows we've used the
Chromium-NSS fork and added a Cmake buildsystem to compile
it statically for Windows. See README.chromium for chromium
changes and README.trustbridge for our modifications.
author | Andre Heinecke <andre.heinecke@intevation.de> |
---|---|
date | Mon, 28 Jul 2014 10:47:06 +0200 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:1e5118fa0cb1 |
---|---|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
5 | |
6 #include "primpl.h" | |
7 | |
8 #include <stdlib.h> | |
9 #include <stddef.h> | |
10 | |
11 /* Lock used to lock the monitor cache */ | |
12 #ifdef _PR_NO_PREEMPT | |
13 #define _PR_NEW_LOCK_MCACHE() | |
14 #define _PR_DESTROY_LOCK_MCACHE() | |
15 #define _PR_LOCK_MCACHE() | |
16 #define _PR_UNLOCK_MCACHE() | |
17 #else | |
18 #ifdef _PR_LOCAL_THREADS_ONLY | |
19 #define _PR_NEW_LOCK_MCACHE() | |
20 #define _PR_DESTROY_LOCK_MCACHE() | |
21 #define _PR_LOCK_MCACHE() { PRIntn _is; _PR_INTSOFF(_is) | |
22 #define _PR_UNLOCK_MCACHE() _PR_INTSON(_is); } | |
23 #else | |
24 PRLock *_pr_mcacheLock; | |
25 #define _PR_NEW_LOCK_MCACHE() (_pr_mcacheLock = PR_NewLock()) | |
26 #define _PR_DESTROY_LOCK_MCACHE() \ | |
27 PR_BEGIN_MACRO \ | |
28 if (_pr_mcacheLock) { \ | |
29 PR_DestroyLock(_pr_mcacheLock); \ | |
30 _pr_mcacheLock = NULL; \ | |
31 } \ | |
32 PR_END_MACRO | |
33 #define _PR_LOCK_MCACHE() PR_Lock(_pr_mcacheLock) | |
34 #define _PR_UNLOCK_MCACHE() PR_Unlock(_pr_mcacheLock) | |
35 #endif | |
36 #endif | |
37 | |
38 /************************************************************************/ | |
39 | |
40 typedef struct MonitorCacheEntryStr MonitorCacheEntry; | |
41 | |
42 struct MonitorCacheEntryStr { | |
43 MonitorCacheEntry* next; | |
44 void* address; | |
45 PRMonitor* mon; | |
46 long cacheEntryCount; | |
47 }; | |
48 | |
49 /* | |
50 ** An array of MonitorCacheEntry's, plus a pointer to link these | |
51 ** arrays together. | |
52 */ | |
53 | |
54 typedef struct MonitorCacheEntryBlockStr MonitorCacheEntryBlock; | |
55 | |
56 struct MonitorCacheEntryBlockStr { | |
57 MonitorCacheEntryBlock* next; | |
58 MonitorCacheEntry entries[1]; | |
59 }; | |
60 | |
61 static PRUint32 hash_mask; | |
62 static PRUintn num_hash_buckets; | |
63 static PRUintn num_hash_buckets_log2; | |
64 static MonitorCacheEntry **hash_buckets; | |
65 static MonitorCacheEntry *free_entries; | |
66 static PRUintn num_free_entries; | |
67 static PRBool expanding; | |
68 static MonitorCacheEntryBlock *mcache_blocks; | |
69 | |
70 static void (*OnMonitorRecycle)(void *address); | |
71 | |
72 #define HASH(address) \ | |
73 ((PRUint32) ( ((PRUptrdiff)(address) >> 2) ^ \ | |
74 ((PRUptrdiff)(address) >> 10) ) \ | |
75 & hash_mask) | |
76 | |
77 /* | |
78 ** Expand the monitor cache. This grows the hash buckets and allocates a | |
79 ** new chunk of cache entries and throws them on the free list. We keep | |
80 ** as many hash buckets as there are entries. | |
81 ** | |
82 ** Because we call malloc and malloc may need the monitor cache, we must | |
83 ** ensure that there are several free monitor cache entries available for | |
84 ** malloc to get. FREE_THRESHOLD is used to prevent monitor cache | |
85 ** starvation during monitor cache expansion. | |
86 */ | |
87 | |
88 #define FREE_THRESHOLD 5 | |
89 | |
90 static PRStatus ExpandMonitorCache(PRUintn new_size_log2) | |
91 { | |
92 MonitorCacheEntry **old_hash_buckets, *p; | |
93 PRUintn i, entries, old_num_hash_buckets, added; | |
94 MonitorCacheEntry **new_hash_buckets; | |
95 MonitorCacheEntryBlock *new_block; | |
96 | |
97 entries = 1L << new_size_log2; | |
98 | |
99 /* | |
100 ** Expand the monitor-cache-entry free list | |
101 */ | |
102 new_block = (MonitorCacheEntryBlock*) | |
103 PR_CALLOC(sizeof(MonitorCacheEntryBlock) | |
104 + (entries - 1) * sizeof(MonitorCacheEntry)); | |
105 if (NULL == new_block) return PR_FAILURE; | |
106 | |
107 /* | |
108 ** Allocate system monitors for the new monitor cache entries. If we | |
109 ** run out of system monitors, break out of the loop. | |
110 */ | |
111 for (i = 0, p = new_block->entries; i < entries; i++, p++) { | |
112 p->mon = PR_NewMonitor(); | |
113 if (!p->mon) | |
114 break; | |
115 } | |
116 added = i; | |
117 if (added != entries) { | |
118 MonitorCacheEntryBlock *realloc_block; | |
119 | |
120 if (added == 0) { | |
121 /* Totally out of system monitors. Lossage abounds */ | |
122 PR_DELETE(new_block); | |
123 return PR_FAILURE; | |
124 } | |
125 | |
126 /* | |
127 ** We were able to allocate some of the system monitors. Use | |
128 ** realloc to shrink down the new_block memory. If that fails, | |
129 ** carry on with the too-large new_block. | |
130 */ | |
131 realloc_block = (MonitorCacheEntryBlock*) | |
132 PR_REALLOC(new_block, sizeof(MonitorCacheEntryBlock) | |
133 + (added - 1) * sizeof(MonitorCacheEntry)); | |
134 if (realloc_block) | |
135 new_block = realloc_block; | |
136 } | |
137 | |
138 /* | |
139 ** Now that we have allocated all of the system monitors, build up | |
140 ** the new free list. We can just update the free_list because we own | |
141 ** the mcache-lock and we aren't calling anyone who might want to use | |
142 ** it. | |
143 */ | |
144 for (i = 0, p = new_block->entries; i < added - 1; i++, p++) | |
145 p->next = p + 1; | |
146 p->next = free_entries; | |
147 free_entries = new_block->entries; | |
148 num_free_entries += added; | |
149 new_block->next = mcache_blocks; | |
150 mcache_blocks = new_block; | |
151 | |
152 /* Try to expand the hash table */ | |
153 new_hash_buckets = (MonitorCacheEntry**) | |
154 PR_CALLOC(entries * sizeof(MonitorCacheEntry*)); | |
155 if (NULL == new_hash_buckets) { | |
156 /* | |
157 ** Partial lossage. In this situation we don't get any more hash | |
158 ** buckets, which just means that the table lookups will take | |
159 ** longer. This is bad, but not fatal | |
160 */ | |
161 PR_LOG(_pr_cmon_lm, PR_LOG_WARNING, | |
162 ("unable to grow monitor cache hash buckets")); | |
163 return PR_SUCCESS; | |
164 } | |
165 | |
166 /* | |
167 ** Compute new hash mask value. This value is used to mask an address | |
168 ** until it's bits are in the right spot for indexing into the hash | |
169 ** table. | |
170 */ | |
171 hash_mask = entries - 1; | |
172 | |
173 /* | |
174 ** Expand the hash table. We have to rehash everything in the old | |
175 ** table into the new table. | |
176 */ | |
177 old_hash_buckets = hash_buckets; | |
178 old_num_hash_buckets = num_hash_buckets; | |
179 for (i = 0; i < old_num_hash_buckets; i++) { | |
180 p = old_hash_buckets[i]; | |
181 while (p) { | |
182 MonitorCacheEntry *next = p->next; | |
183 | |
184 /* Hash based on new table size, and then put p in the new table */ | |
185 PRUintn hash = HASH(p->address); | |
186 p->next = new_hash_buckets[hash]; | |
187 new_hash_buckets[hash] = p; | |
188 | |
189 p = next; | |
190 } | |
191 } | |
192 | |
193 /* | |
194 ** Switch over to new hash table and THEN call free of the old | |
195 ** table. Since free might re-enter _pr_mcache_lock, things would | |
196 ** break terribly if it used the old hash table. | |
197 */ | |
198 hash_buckets = new_hash_buckets; | |
199 num_hash_buckets = entries; | |
200 num_hash_buckets_log2 = new_size_log2; | |
201 PR_DELETE(old_hash_buckets); | |
202 | |
203 PR_LOG(_pr_cmon_lm, PR_LOG_NOTICE, | |
204 ("expanded monitor cache to %d (buckets %d)", | |
205 num_free_entries, entries)); | |
206 | |
207 return PR_SUCCESS; | |
208 } /* ExpandMonitorCache */ | |
209 | |
210 /* | |
211 ** Lookup a monitor cache entry by address. Return a pointer to the | |
212 ** pointer to the monitor cache entry on success, null on failure. | |
213 */ | |
214 static MonitorCacheEntry **LookupMonitorCacheEntry(void *address) | |
215 { | |
216 PRUintn hash; | |
217 MonitorCacheEntry **pp, *p; | |
218 | |
219 hash = HASH(address); | |
220 pp = hash_buckets + hash; | |
221 while ((p = *pp) != 0) { | |
222 if (p->address == address) { | |
223 if (p->cacheEntryCount > 0) | |
224 return pp; | |
225 return NULL; | |
226 } | |
227 pp = &p->next; | |
228 } | |
229 return NULL; | |
230 } | |
231 | |
232 /* | |
233 ** Try to create a new cached monitor. If it's already in the cache, | |
234 ** great - return it. Otherwise get a new free cache entry and set it | |
235 ** up. If the cache free space is getting low, expand the cache. | |
236 */ | |
237 static PRMonitor *CreateMonitor(void *address) | |
238 { | |
239 PRUintn hash; | |
240 MonitorCacheEntry **pp, *p; | |
241 | |
242 hash = HASH(address); | |
243 pp = hash_buckets + hash; | |
244 while ((p = *pp) != 0) { | |
245 if (p->address == address) goto gotit; | |
246 | |
247 pp = &p->next; | |
248 } | |
249 | |
250 /* Expand the monitor cache if we have run out of free slots in the table */ | |
251 if (num_free_entries < FREE_THRESHOLD) { | |
252 /* Expand monitor cache */ | |
253 | |
254 /* | |
255 ** This function is called with the lock held. So what's the 'expanding' | |
256 ** boolean all about? Seems a bit redundant. | |
257 */ | |
258 if (!expanding) { | |
259 PRStatus rv; | |
260 | |
261 expanding = PR_TRUE; | |
262 rv = ExpandMonitorCache(num_hash_buckets_log2 + 1); | |
263 expanding = PR_FALSE; | |
264 if (PR_FAILURE == rv) return NULL; | |
265 | |
266 /* redo the hash because it'll be different now */ | |
267 hash = HASH(address); | |
268 } else { | |
269 /* | |
270 ** We are in process of expanding and we need a cache | |
271 ** monitor. Make sure we have enough! | |
272 */ | |
273 PR_ASSERT(num_free_entries > 0); | |
274 } | |
275 } | |
276 | |
277 /* Make a new monitor */ | |
278 p = free_entries; | |
279 free_entries = p->next; | |
280 num_free_entries--; | |
281 if (OnMonitorRecycle && p->address) | |
282 OnMonitorRecycle(p->address); | |
283 p->address = address; | |
284 p->next = hash_buckets[hash]; | |
285 hash_buckets[hash] = p; | |
286 PR_ASSERT(p->cacheEntryCount == 0); | |
287 | |
288 gotit: | |
289 p->cacheEntryCount++; | |
290 return p->mon; | |
291 } | |
292 | |
293 /* | |
294 ** Initialize the monitor cache | |
295 */ | |
296 void _PR_InitCMon(void) | |
297 { | |
298 _PR_NEW_LOCK_MCACHE(); | |
299 ExpandMonitorCache(3); | |
300 } | |
301 | |
302 /* | |
303 ** Destroy the monitor cache | |
304 */ | |
305 void _PR_CleanupCMon(void) | |
306 { | |
307 _PR_DESTROY_LOCK_MCACHE(); | |
308 | |
309 while (free_entries) { | |
310 PR_DestroyMonitor(free_entries->mon); | |
311 free_entries = free_entries->next; | |
312 } | |
313 num_free_entries = 0; | |
314 | |
315 while (mcache_blocks) { | |
316 MonitorCacheEntryBlock *block; | |
317 | |
318 block = mcache_blocks; | |
319 mcache_blocks = block->next; | |
320 PR_DELETE(block); | |
321 } | |
322 | |
323 PR_DELETE(hash_buckets); | |
324 hash_mask = 0; | |
325 num_hash_buckets = 0; | |
326 num_hash_buckets_log2 = 0; | |
327 | |
328 expanding = PR_FALSE; | |
329 OnMonitorRecycle = NULL; | |
330 } | |
331 | |
332 /* | |
333 ** Create monitor for address. Don't enter the monitor while we have the | |
334 ** mcache locked because we might block! | |
335 */ | |
336 PR_IMPLEMENT(PRMonitor*) PR_CEnterMonitor(void *address) | |
337 { | |
338 PRMonitor *mon; | |
339 | |
340 if (!_pr_initialized) _PR_ImplicitInitialization(); | |
341 | |
342 _PR_LOCK_MCACHE(); | |
343 mon = CreateMonitor(address); | |
344 _PR_UNLOCK_MCACHE(); | |
345 | |
346 if (!mon) return NULL; | |
347 | |
348 PR_EnterMonitor(mon); | |
349 return mon; | |
350 } | |
351 | |
352 PR_IMPLEMENT(PRStatus) PR_CExitMonitor(void *address) | |
353 { | |
354 MonitorCacheEntry **pp, *p; | |
355 PRStatus status = PR_SUCCESS; | |
356 | |
357 _PR_LOCK_MCACHE(); | |
358 pp = LookupMonitorCacheEntry(address); | |
359 if (pp != NULL) { | |
360 p = *pp; | |
361 if (--p->cacheEntryCount == 0) { | |
362 /* | |
363 ** Nobody is using the system monitor. Put it on the cached free | |
364 ** list. We are safe from somebody trying to use it because we | |
365 ** have the mcache locked. | |
366 */ | |
367 p->address = 0; /* defensive move */ | |
368 *pp = p->next; /* unlink from hash_buckets */ | |
369 p->next = free_entries; /* link into free list */ | |
370 free_entries = p; | |
371 num_free_entries++; /* count it as free */ | |
372 } | |
373 status = PR_ExitMonitor(p->mon); | |
374 } else { | |
375 status = PR_FAILURE; | |
376 } | |
377 _PR_UNLOCK_MCACHE(); | |
378 | |
379 return status; | |
380 } | |
381 | |
382 PR_IMPLEMENT(PRStatus) PR_CWait(void *address, PRIntervalTime ticks) | |
383 { | |
384 MonitorCacheEntry **pp; | |
385 PRMonitor *mon; | |
386 | |
387 _PR_LOCK_MCACHE(); | |
388 pp = LookupMonitorCacheEntry(address); | |
389 mon = pp ? ((*pp)->mon) : NULL; | |
390 _PR_UNLOCK_MCACHE(); | |
391 | |
392 if (mon == NULL) | |
393 return PR_FAILURE; | |
394 return PR_Wait(mon, ticks); | |
395 } | |
396 | |
397 PR_IMPLEMENT(PRStatus) PR_CNotify(void *address) | |
398 { | |
399 MonitorCacheEntry **pp; | |
400 PRMonitor *mon; | |
401 | |
402 _PR_LOCK_MCACHE(); | |
403 pp = LookupMonitorCacheEntry(address); | |
404 mon = pp ? ((*pp)->mon) : NULL; | |
405 _PR_UNLOCK_MCACHE(); | |
406 | |
407 if (mon == NULL) | |
408 return PR_FAILURE; | |
409 return PR_Notify(mon); | |
410 } | |
411 | |
412 PR_IMPLEMENT(PRStatus) PR_CNotifyAll(void *address) | |
413 { | |
414 MonitorCacheEntry **pp; | |
415 PRMonitor *mon; | |
416 | |
417 _PR_LOCK_MCACHE(); | |
418 pp = LookupMonitorCacheEntry(address); | |
419 mon = pp ? ((*pp)->mon) : NULL; | |
420 _PR_UNLOCK_MCACHE(); | |
421 | |
422 if (mon == NULL) | |
423 return PR_FAILURE; | |
424 return PR_NotifyAll(mon); | |
425 } | |
426 | |
427 PR_IMPLEMENT(void) | |
428 PR_CSetOnMonitorRecycle(void (*callback)(void *address)) | |
429 { | |
430 OnMonitorRecycle = callback; | |
431 } |