Mercurial > trustbridge > nss-cmake-static
comparison nspr/pr/src/threads/combined/pruthr.c @ 0:1e5118fa0cb1
This is NSS with a Cmake Buildsyste
To compile a static NSS library for Windows we've used the
Chromium-NSS fork and added a Cmake buildsystem to compile
it statically for Windows. See README.chromium for chromium
changes and README.trustbridge for our modifications.
author | Andre Heinecke <andre.heinecke@intevation.de> |
---|---|
date | Mon, 28 Jul 2014 10:47:06 +0200 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:1e5118fa0cb1 |
---|---|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
5 | |
6 #include "primpl.h" | |
7 #include <signal.h> | |
8 #include <string.h> | |
9 | |
10 #if defined(WIN95) | |
11 /* | |
12 ** Some local variables report warnings on Win95 because the code paths | |
13 ** using them are conditioned on HAVE_CUSTOME_USER_THREADS. | |
14 ** The pragma suppresses the warning. | |
15 ** | |
16 */ | |
17 #pragma warning(disable : 4101) | |
18 #endif | |
19 | |
20 /* _pr_activeLock protects the following global variables */ | |
21 PRLock *_pr_activeLock; | |
22 PRInt32 _pr_primordialExitCount; /* In PR_Cleanup(), the primordial thread | |
23 * waits until all other user (non-system) | |
24 * threads have terminated before it exits. | |
25 * So whenever we decrement _pr_userActive, | |
26 * it is compared with | |
27 * _pr_primordialExitCount. | |
28 * If the primordial thread is a system | |
29 * thread, then _pr_primordialExitCount | |
30 * is 0. If the primordial thread is | |
31 * itself a user thread, then | |
32 * _pr_primordialThread is 1. | |
33 */ | |
34 PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to | |
35 * _pr_primordialExitCount, this condition | |
36 * variable is notified. | |
37 */ | |
38 | |
39 PRLock *_pr_deadQLock; | |
40 PRUint32 _pr_numNativeDead; | |
41 PRUint32 _pr_numUserDead; | |
42 PRCList _pr_deadNativeQ; | |
43 PRCList _pr_deadUserQ; | |
44 | |
45 PRUint32 _pr_join_counter; | |
46 | |
47 PRUint32 _pr_local_threads; | |
48 PRUint32 _pr_global_threads; | |
49 | |
50 PRBool suspendAllOn = PR_FALSE; | |
51 PRThread *suspendAllThread = NULL; | |
52 | |
53 extern PRCList _pr_active_global_threadQ; | |
54 extern PRCList _pr_active_local_threadQ; | |
55 | |
56 static void _PR_DecrActiveThreadCount(PRThread *thread); | |
57 static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack *); | |
58 static void _PR_InitializeNativeStack(PRThreadStack *ts); | |
59 static void _PR_InitializeRecycledThread(PRThread *thread); | |
60 static void _PR_UserRunThread(void); | |
61 | |
62 void _PR_InitThreads(PRThreadType type, PRThreadPriority priority, | |
63 PRUintn maxPTDs) | |
64 { | |
65 PRThread *thread; | |
66 PRThreadStack *stack; | |
67 | |
68 PR_ASSERT(priority == PR_PRIORITY_NORMAL); | |
69 | |
70 _pr_terminationCVLock = PR_NewLock(); | |
71 _pr_activeLock = PR_NewLock(); | |
72 | |
73 #ifndef HAVE_CUSTOM_USER_THREADS | |
74 stack = PR_NEWZAP(PRThreadStack); | |
75 #ifdef HAVE_STACK_GROWING_UP | |
76 stack->stackTop = (char*) ((((long)&type) >> _pr_pageShift) | |
77 << _pr_pageShift); | |
78 #else | |
79 #if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS) | |
80 stack->stackTop = (char*) &thread; | |
81 #else | |
82 stack->stackTop = (char*) ((((long)&type + _pr_pageSize - 1) | |
83 >> _pr_pageShift) << _pr_pageShift); | |
84 #endif | |
85 #endif | |
86 #else | |
87 /* If stack is NULL, we're using custom user threads like NT fibers. */ | |
88 stack = PR_NEWZAP(PRThreadStack); | |
89 if (stack) { | |
90 stack->stackSize = 0; | |
91 _PR_InitializeNativeStack(stack); | |
92 } | |
93 #endif /* HAVE_CUSTOM_USER_THREADS */ | |
94 | |
95 thread = _PR_AttachThread(type, priority, stack); | |
96 if (thread) { | |
97 _PR_MD_SET_CURRENT_THREAD(thread); | |
98 | |
99 if (type == PR_SYSTEM_THREAD) { | |
100 thread->flags = _PR_SYSTEM; | |
101 _pr_systemActive++; | |
102 _pr_primordialExitCount = 0; | |
103 } else { | |
104 _pr_userActive++; | |
105 _pr_primordialExitCount = 1; | |
106 } | |
107 thread->no_sched = 1; | |
108 _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock); | |
109 } | |
110 | |
111 if (!thread) PR_Abort(); | |
112 #ifdef _PR_LOCAL_THREADS_ONLY | |
113 thread->flags |= _PR_PRIMORDIAL; | |
114 #else | |
115 thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE; | |
116 #endif | |
117 | |
118 /* | |
119 * Needs _PR_PRIMORDIAL flag set before calling | |
120 * _PR_MD_INIT_THREAD() | |
121 */ | |
122 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { | |
123 /* | |
124 * XXX do what? | |
125 */ | |
126 } | |
127 | |
128 if (_PR_IS_NATIVE_THREAD(thread)) { | |
129 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); | |
130 _pr_global_threads++; | |
131 } else { | |
132 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); | |
133 _pr_local_threads++; | |
134 } | |
135 | |
136 _pr_recycleThreads = 0; | |
137 _pr_deadQLock = PR_NewLock(); | |
138 _pr_numNativeDead = 0; | |
139 _pr_numUserDead = 0; | |
140 PR_INIT_CLIST(&_pr_deadNativeQ); | |
141 PR_INIT_CLIST(&_pr_deadUserQ); | |
142 } | |
143 | |
144 void _PR_CleanupThreads(void) | |
145 { | |
146 if (_pr_terminationCVLock) { | |
147 PR_DestroyLock(_pr_terminationCVLock); | |
148 _pr_terminationCVLock = NULL; | |
149 } | |
150 if (_pr_activeLock) { | |
151 PR_DestroyLock(_pr_activeLock); | |
152 _pr_activeLock = NULL; | |
153 } | |
154 if (_pr_primordialExitCVar) { | |
155 PR_DestroyCondVar(_pr_primordialExitCVar); | |
156 _pr_primordialExitCVar = NULL; | |
157 } | |
158 /* TODO _pr_dead{Native,User}Q need to be deleted */ | |
159 if (_pr_deadQLock) { | |
160 PR_DestroyLock(_pr_deadQLock); | |
161 _pr_deadQLock = NULL; | |
162 } | |
163 } | |
164 | |
165 /* | |
166 ** Initialize a stack for a native thread | |
167 */ | |
168 static void _PR_InitializeNativeStack(PRThreadStack *ts) | |
169 { | |
170 if( ts && (ts->stackTop == 0) ) { | |
171 ts->allocSize = ts->stackSize; | |
172 | |
173 /* | |
174 ** Setup stackTop and stackBottom values. | |
175 */ | |
176 #ifdef HAVE_STACK_GROWING_UP | |
177 ts->allocBase = (char*) ((((long)&ts) >> _pr_pageShift) | |
178 << _pr_pageShift); | |
179 ts->stackBottom = ts->allocBase + ts->stackSize; | |
180 ts->stackTop = ts->allocBase; | |
181 #else | |
182 ts->allocBase = (char*) ((((long)&ts + _pr_pageSize - 1) | |
183 >> _pr_pageShift) << _pr_pageShift); | |
184 ts->stackTop = ts->allocBase; | |
185 ts->stackBottom = ts->allocBase - ts->stackSize; | |
186 #endif | |
187 } | |
188 } | |
189 | |
190 void _PR_NotifyJoinWaiters(PRThread *thread) | |
191 { | |
192 /* | |
193 ** Handle joinable threads. Change the state to waiting for join. | |
194 ** Remove from our run Q and put it on global waiting to join Q. | |
195 ** Notify on our "termination" condition variable so that joining | |
196 ** thread will know about our termination. Switch our context and | |
197 ** come back later on to continue the cleanup. | |
198 */ | |
199 PR_ASSERT(thread == _PR_MD_CURRENT_THREAD()); | |
200 if (thread->term != NULL) { | |
201 PR_Lock(_pr_terminationCVLock); | |
202 _PR_THREAD_LOCK(thread); | |
203 thread->state = _PR_JOIN_WAIT; | |
204 if ( !_PR_IS_NATIVE_THREAD(thread) ) { | |
205 _PR_MISCQ_LOCK(thread->cpu); | |
206 _PR_ADD_JOINQ(thread, thread->cpu); | |
207 _PR_MISCQ_UNLOCK(thread->cpu); | |
208 } | |
209 _PR_THREAD_UNLOCK(thread); | |
210 PR_NotifyCondVar(thread->term); | |
211 PR_Unlock(_pr_terminationCVLock); | |
212 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); | |
213 PR_ASSERT(thread->state != _PR_JOIN_WAIT); | |
214 } | |
215 | |
216 } | |
217 | |
218 /* | |
219 * Zero some of the data members of a recycled thread. | |
220 * | |
221 * Note that we can do this either when a dead thread is added to | |
222 * the dead thread queue or when it is reused. Here, we are doing | |
223 * this lazily, when the thread is reused in _PR_CreateThread(). | |
224 */ | |
225 static void _PR_InitializeRecycledThread(PRThread *thread) | |
226 { | |
227 /* | |
228 * Assert that the following data members are already zeroed | |
229 * by _PR_CleanupThread(). | |
230 */ | |
231 #ifdef DEBUG | |
232 if (thread->privateData) { | |
233 unsigned int i; | |
234 for (i = 0; i < thread->tpdLength; i++) { | |
235 PR_ASSERT(thread->privateData[i] == NULL); | |
236 } | |
237 } | |
238 #endif | |
239 PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0); | |
240 PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0); | |
241 PR_ASSERT(thread->errorStringLength == 0); | |
242 PR_ASSERT(thread->name == 0); | |
243 | |
244 /* Reset data members in thread structure */ | |
245 thread->errorCode = thread->osErrorCode = 0; | |
246 thread->io_pending = thread->io_suspended = PR_FALSE; | |
247 thread->environment = 0; | |
248 PR_INIT_CLIST(&thread->lockList); | |
249 } | |
250 | |
251 PRStatus _PR_RecycleThread(PRThread *thread) | |
252 { | |
253 if ( _PR_IS_NATIVE_THREAD(thread) && | |
254 _PR_NUM_DEADNATIVE < _pr_recycleThreads) { | |
255 _PR_DEADQ_LOCK; | |
256 PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ); | |
257 _PR_INC_DEADNATIVE; | |
258 _PR_DEADQ_UNLOCK; | |
259 return (PR_SUCCESS); | |
260 } else if ( !_PR_IS_NATIVE_THREAD(thread) && | |
261 _PR_NUM_DEADUSER < _pr_recycleThreads) { | |
262 _PR_DEADQ_LOCK; | |
263 PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ); | |
264 _PR_INC_DEADUSER; | |
265 _PR_DEADQ_UNLOCK; | |
266 return (PR_SUCCESS); | |
267 } | |
268 return (PR_FAILURE); | |
269 } | |
270 | |
271 /* | |
272 * Decrement the active thread count, either _pr_systemActive or | |
273 * _pr_userActive, depending on whether the thread is a system thread | |
274 * or a user thread. If all the user threads, except possibly | |
275 * the primordial thread, have terminated, we notify the primordial | |
276 * thread of this condition. | |
277 * | |
278 * Since this function will lock _pr_activeLock, do not call this | |
279 * function while holding the _pr_activeLock lock, as this will result | |
280 * in a deadlock. | |
281 */ | |
282 | |
283 static void | |
284 _PR_DecrActiveThreadCount(PRThread *thread) | |
285 { | |
286 PR_Lock(_pr_activeLock); | |
287 if (thread->flags & _PR_SYSTEM) { | |
288 _pr_systemActive--; | |
289 } else { | |
290 _pr_userActive--; | |
291 if (_pr_userActive == _pr_primordialExitCount) { | |
292 PR_NotifyCondVar(_pr_primordialExitCVar); | |
293 } | |
294 } | |
295 PR_Unlock(_pr_activeLock); | |
296 } | |
297 | |
298 /* | |
299 ** Detach thread structure | |
300 */ | |
301 static void | |
302 _PR_DestroyThread(PRThread *thread) | |
303 { | |
304 _PR_MD_FREE_LOCK(&thread->threadLock); | |
305 PR_DELETE(thread); | |
306 } | |
307 | |
308 void | |
309 _PR_NativeDestroyThread(PRThread *thread) | |
310 { | |
311 if(thread->term) { | |
312 PR_DestroyCondVar(thread->term); | |
313 thread->term = 0; | |
314 } | |
315 if (NULL != thread->privateData) { | |
316 PR_ASSERT(0 != thread->tpdLength); | |
317 PR_DELETE(thread->privateData); | |
318 thread->tpdLength = 0; | |
319 } | |
320 PR_DELETE(thread->stack); | |
321 _PR_DestroyThread(thread); | |
322 } | |
323 | |
324 void | |
325 _PR_UserDestroyThread(PRThread *thread) | |
326 { | |
327 if(thread->term) { | |
328 PR_DestroyCondVar(thread->term); | |
329 thread->term = 0; | |
330 } | |
331 if (NULL != thread->privateData) { | |
332 PR_ASSERT(0 != thread->tpdLength); | |
333 PR_DELETE(thread->privateData); | |
334 thread->tpdLength = 0; | |
335 } | |
336 _PR_MD_FREE_LOCK(&thread->threadLock); | |
337 if (thread->threadAllocatedOnStack == 1) { | |
338 _PR_MD_CLEAN_THREAD(thread); | |
339 /* | |
340 * Because the no_sched field is set, this thread/stack will | |
341 * will not be re-used until the flag is cleared by the thread | |
342 * we will context switch to. | |
343 */ | |
344 _PR_FreeStack(thread->stack); | |
345 } else { | |
346 #ifdef WINNT | |
347 _PR_MD_CLEAN_THREAD(thread); | |
348 #else | |
349 /* | |
350 * This assertion does not apply to NT. On NT, every fiber | |
351 * has its threadAllocatedOnStack equal to 0. Elsewhere, | |
352 * only the primordial thread has its threadAllocatedOnStack | |
353 * equal to 0. | |
354 */ | |
355 PR_ASSERT(thread->flags & _PR_PRIMORDIAL); | |
356 #endif | |
357 } | |
358 } | |
359 | |
360 | |
361 /* | |
362 ** Run a thread's start function. When the start function returns the | |
363 ** thread is done executing and no longer needs the CPU. If there are no | |
364 ** more user threads running then we can exit the program. | |
365 */ | |
366 void _PR_NativeRunThread(void *arg) | |
367 { | |
368 PRThread *thread = (PRThread *)arg; | |
369 | |
370 _PR_MD_SET_CURRENT_THREAD(thread); | |
371 | |
372 _PR_MD_SET_CURRENT_CPU(NULL); | |
373 | |
374 /* Set up the thread stack information */ | |
375 _PR_InitializeNativeStack(thread->stack); | |
376 | |
377 /* Set up the thread md information */ | |
378 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { | |
379 /* | |
380 * thread failed to initialize itself, possibly due to | |
381 * failure to allocate per-thread resources | |
382 */ | |
383 return; | |
384 } | |
385 | |
386 while(1) { | |
387 thread->state = _PR_RUNNING; | |
388 | |
389 /* | |
390 * Add to list of active threads | |
391 */ | |
392 PR_Lock(_pr_activeLock); | |
393 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); | |
394 _pr_global_threads++; | |
395 PR_Unlock(_pr_activeLock); | |
396 | |
397 (*thread->startFunc)(thread->arg); | |
398 | |
399 /* | |
400 * The following two assertions are meant for NT asynch io. | |
401 * | |
402 * The thread should have no asynch io in progress when it | |
403 * exits, otherwise the overlapped buffer, which is part of | |
404 * the thread structure, would become invalid. | |
405 */ | |
406 PR_ASSERT(thread->io_pending == PR_FALSE); | |
407 /* | |
408 * This assertion enforces the programming guideline that | |
409 * if an io function times out or is interrupted, the thread | |
410 * should close the fd to force the asynch io to abort | |
411 * before it exits. Right now, closing the fd is the only | |
412 * way to clear the io_suspended flag. | |
413 */ | |
414 PR_ASSERT(thread->io_suspended == PR_FALSE); | |
415 | |
416 /* | |
417 * remove thread from list of active threads | |
418 */ | |
419 PR_Lock(_pr_activeLock); | |
420 PR_REMOVE_LINK(&thread->active); | |
421 _pr_global_threads--; | |
422 PR_Unlock(_pr_activeLock); | |
423 | |
424 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); | |
425 | |
426 /* All done, time to go away */ | |
427 _PR_CleanupThread(thread); | |
428 | |
429 _PR_NotifyJoinWaiters(thread); | |
430 | |
431 _PR_DecrActiveThreadCount(thread); | |
432 | |
433 thread->state = _PR_DEAD_STATE; | |
434 | |
435 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == | |
436 PR_FAILURE)) { | |
437 /* | |
438 * thread not recycled | |
439 * platform-specific thread exit processing | |
440 * - for stuff like releasing native-thread resources, etc. | |
441 */ | |
442 _PR_MD_EXIT_THREAD(thread); | |
443 /* | |
444 * Free memory allocated for the thread | |
445 */ | |
446 _PR_NativeDestroyThread(thread); | |
447 /* | |
448 * thread gone, cannot de-reference thread now | |
449 */ | |
450 return; | |
451 } | |
452 | |
453 /* Now wait for someone to activate us again... */ | |
454 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); | |
455 } | |
456 } | |
457 | |
458 static void _PR_UserRunThread(void) | |
459 { | |
460 PRThread *thread = _PR_MD_CURRENT_THREAD(); | |
461 PRIntn is; | |
462 | |
463 if (_MD_LAST_THREAD()) | |
464 _MD_LAST_THREAD()->no_sched = 0; | |
465 | |
466 #ifdef HAVE_CUSTOM_USER_THREADS | |
467 if (thread->stack == NULL) { | |
468 thread->stack = PR_NEWZAP(PRThreadStack); | |
469 _PR_InitializeNativeStack(thread->stack); | |
470 } | |
471 #endif /* HAVE_CUSTOM_USER_THREADS */ | |
472 | |
473 while(1) { | |
474 /* Run thread main */ | |
475 if ( !_PR_IS_NATIVE_THREAD(thread)) _PR_MD_SET_INTSOFF(0); | |
476 | |
477 /* | |
478 * Add to list of active threads | |
479 */ | |
480 if (!(thread->flags & _PR_IDLE_THREAD)) { | |
481 PR_Lock(_pr_activeLock); | |
482 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); | |
483 _pr_local_threads++; | |
484 PR_Unlock(_pr_activeLock); | |
485 } | |
486 | |
487 (*thread->startFunc)(thread->arg); | |
488 | |
489 /* | |
490 * The following two assertions are meant for NT asynch io. | |
491 * | |
492 * The thread should have no asynch io in progress when it | |
493 * exits, otherwise the overlapped buffer, which is part of | |
494 * the thread structure, would become invalid. | |
495 */ | |
496 PR_ASSERT(thread->io_pending == PR_FALSE); | |
497 /* | |
498 * This assertion enforces the programming guideline that | |
499 * if an io function times out or is interrupted, the thread | |
500 * should close the fd to force the asynch io to abort | |
501 * before it exits. Right now, closing the fd is the only | |
502 * way to clear the io_suspended flag. | |
503 */ | |
504 PR_ASSERT(thread->io_suspended == PR_FALSE); | |
505 | |
506 PR_Lock(_pr_activeLock); | |
507 /* | |
508 * remove thread from list of active threads | |
509 */ | |
510 if (!(thread->flags & _PR_IDLE_THREAD)) { | |
511 PR_REMOVE_LINK(&thread->active); | |
512 _pr_local_threads--; | |
513 } | |
514 PR_Unlock(_pr_activeLock); | |
515 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); | |
516 | |
517 /* All done, time to go away */ | |
518 _PR_CleanupThread(thread); | |
519 | |
520 _PR_INTSOFF(is); | |
521 | |
522 _PR_NotifyJoinWaiters(thread); | |
523 | |
524 _PR_DecrActiveThreadCount(thread); | |
525 | |
526 thread->state = _PR_DEAD_STATE; | |
527 | |
528 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == | |
529 PR_FAILURE)) { | |
530 /* | |
531 ** Destroy the thread resources | |
532 */ | |
533 _PR_UserDestroyThread(thread); | |
534 } | |
535 | |
536 /* | |
537 ** Find another user thread to run. This cpu has finished the | |
538 ** previous threads main and is now ready to run another thread. | |
539 */ | |
540 { | |
541 PRInt32 is; | |
542 _PR_INTSOFF(is); | |
543 _PR_MD_SWITCH_CONTEXT(thread); | |
544 } | |
545 | |
546 /* Will land here when we get scheduled again if we are recycling... */ | |
547 } | |
548 } | |
549 | |
550 void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri) | |
551 { | |
552 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
553 PRIntn is; | |
554 | |
555 if ( _PR_IS_NATIVE_THREAD(thread) ) { | |
556 _PR_MD_SET_PRIORITY(&(thread->md), newPri); | |
557 return; | |
558 } | |
559 | |
560 if (!_PR_IS_NATIVE_THREAD(me)) | |
561 _PR_INTSOFF(is); | |
562 _PR_THREAD_LOCK(thread); | |
563 if (newPri != thread->priority) { | |
564 _PRCPU *cpu = thread->cpu; | |
565 | |
566 switch (thread->state) { | |
567 case _PR_RUNNING: | |
568 /* Change my priority */ | |
569 | |
570 _PR_RUNQ_LOCK(cpu); | |
571 thread->priority = newPri; | |
572 if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) { | |
573 if (!_PR_IS_NATIVE_THREAD(me)) | |
574 _PR_SET_RESCHED_FLAG(); | |
575 } | |
576 _PR_RUNQ_UNLOCK(cpu); | |
577 break; | |
578 | |
579 case _PR_RUNNABLE: | |
580 | |
581 _PR_RUNQ_LOCK(cpu); | |
582 /* Move to different runQ */ | |
583 _PR_DEL_RUNQ(thread); | |
584 thread->priority = newPri; | |
585 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); | |
586 _PR_ADD_RUNQ(thread, cpu, newPri); | |
587 _PR_RUNQ_UNLOCK(cpu); | |
588 | |
589 if (newPri > me->priority) { | |
590 if (!_PR_IS_NATIVE_THREAD(me)) | |
591 _PR_SET_RESCHED_FLAG(); | |
592 } | |
593 | |
594 break; | |
595 | |
596 case _PR_LOCK_WAIT: | |
597 case _PR_COND_WAIT: | |
598 case _PR_IO_WAIT: | |
599 case _PR_SUSPENDED: | |
600 | |
601 thread->priority = newPri; | |
602 break; | |
603 } | |
604 } | |
605 _PR_THREAD_UNLOCK(thread); | |
606 if (!_PR_IS_NATIVE_THREAD(me)) | |
607 _PR_INTSON(is); | |
608 } | |
609 | |
610 /* | |
611 ** Suspend the named thread and copy its gc registers into regBuf | |
612 */ | |
613 static void _PR_Suspend(PRThread *thread) | |
614 { | |
615 PRIntn is; | |
616 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
617 | |
618 PR_ASSERT(thread != me); | |
619 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu)); | |
620 | |
621 if (!_PR_IS_NATIVE_THREAD(me)) | |
622 _PR_INTSOFF(is); | |
623 _PR_THREAD_LOCK(thread); | |
624 switch (thread->state) { | |
625 case _PR_RUNNABLE: | |
626 if (!_PR_IS_NATIVE_THREAD(thread)) { | |
627 _PR_RUNQ_LOCK(thread->cpu); | |
628 _PR_DEL_RUNQ(thread); | |
629 _PR_RUNQ_UNLOCK(thread->cpu); | |
630 | |
631 _PR_MISCQ_LOCK(thread->cpu); | |
632 _PR_ADD_SUSPENDQ(thread, thread->cpu); | |
633 _PR_MISCQ_UNLOCK(thread->cpu); | |
634 } else { | |
635 /* | |
636 * Only LOCAL threads are suspended by _PR_Suspend | |
637 */ | |
638 PR_ASSERT(0); | |
639 } | |
640 thread->state = _PR_SUSPENDED; | |
641 break; | |
642 | |
643 case _PR_RUNNING: | |
644 /* | |
645 * The thread being suspended should be a LOCAL thread with | |
646 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state | |
647 */ | |
648 PR_ASSERT(0); | |
649 break; | |
650 | |
651 case _PR_LOCK_WAIT: | |
652 case _PR_IO_WAIT: | |
653 case _PR_COND_WAIT: | |
654 if (_PR_IS_NATIVE_THREAD(thread)) { | |
655 _PR_MD_SUSPEND_THREAD(thread); | |
656 } | |
657 thread->flags |= _PR_SUSPENDING; | |
658 break; | |
659 | |
660 default: | |
661 PR_Abort(); | |
662 } | |
663 _PR_THREAD_UNLOCK(thread); | |
664 if (!_PR_IS_NATIVE_THREAD(me)) | |
665 _PR_INTSON(is); | |
666 } | |
667 | |
668 static void _PR_Resume(PRThread *thread) | |
669 { | |
670 PRThreadPriority pri; | |
671 PRIntn is; | |
672 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
673 | |
674 if (!_PR_IS_NATIVE_THREAD(me)) | |
675 _PR_INTSOFF(is); | |
676 _PR_THREAD_LOCK(thread); | |
677 switch (thread->state) { | |
678 case _PR_SUSPENDED: | |
679 thread->state = _PR_RUNNABLE; | |
680 thread->flags &= ~_PR_SUSPENDING; | |
681 if (!_PR_IS_NATIVE_THREAD(thread)) { | |
682 _PR_MISCQ_LOCK(thread->cpu); | |
683 _PR_DEL_SUSPENDQ(thread); | |
684 _PR_MISCQ_UNLOCK(thread->cpu); | |
685 | |
686 pri = thread->priority; | |
687 | |
688 _PR_RUNQ_LOCK(thread->cpu); | |
689 _PR_ADD_RUNQ(thread, thread->cpu, pri); | |
690 _PR_RUNQ_UNLOCK(thread->cpu); | |
691 | |
692 if (pri > _PR_MD_CURRENT_THREAD()->priority) { | |
693 if (!_PR_IS_NATIVE_THREAD(me)) | |
694 _PR_SET_RESCHED_FLAG(); | |
695 } | |
696 } else { | |
697 PR_ASSERT(0); | |
698 } | |
699 break; | |
700 | |
701 case _PR_IO_WAIT: | |
702 case _PR_COND_WAIT: | |
703 thread->flags &= ~_PR_SUSPENDING; | |
704 /* PR_ASSERT(thread->wait.monitor->stickyCount == 0); */ | |
705 break; | |
706 | |
707 case _PR_LOCK_WAIT: | |
708 { | |
709 PRLock *wLock = thread->wait.lock; | |
710 | |
711 thread->flags &= ~_PR_SUSPENDING; | |
712 | |
713 _PR_LOCK_LOCK(wLock); | |
714 if (thread->wait.lock->owner == 0) { | |
715 _PR_UnblockLockWaiter(thread->wait.lock); | |
716 } | |
717 _PR_LOCK_UNLOCK(wLock); | |
718 break; | |
719 } | |
720 case _PR_RUNNABLE: | |
721 break; | |
722 case _PR_RUNNING: | |
723 /* | |
724 * The thread being suspended should be a LOCAL thread with | |
725 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state | |
726 */ | |
727 PR_ASSERT(0); | |
728 break; | |
729 | |
730 default: | |
731 /* | |
732 * thread should have been in one of the above-listed blocked states | |
733 * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE) | |
734 */ | |
735 PR_Abort(); | |
736 } | |
737 _PR_THREAD_UNLOCK(thread); | |
738 if (!_PR_IS_NATIVE_THREAD(me)) | |
739 _PR_INTSON(is); | |
740 | |
741 } | |
742 | |
743 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) | |
744 static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus) | |
745 { | |
746 PRThread *thread; | |
747 PRIntn pri; | |
748 PRUint32 r; | |
749 PRCList *qp; | |
750 PRIntn priMin, priMax; | |
751 | |
752 _PR_RUNQ_LOCK(cpu); | |
753 r = _PR_RUNQREADYMASK(cpu); | |
754 if (r==0) { | |
755 priMin = priMax = PR_PRIORITY_FIRST; | |
756 } else if (r == (1<<PR_PRIORITY_NORMAL) ) { | |
757 priMin = priMax = PR_PRIORITY_NORMAL; | |
758 } else { | |
759 priMin = PR_PRIORITY_FIRST; | |
760 priMax = PR_PRIORITY_LAST; | |
761 } | |
762 thread = NULL; | |
763 for (pri = priMax; pri >= priMin ; pri-- ) { | |
764 if (r & (1 << pri)) { | |
765 for (qp = _PR_RUNQ(cpu)[pri].next; | |
766 qp != &_PR_RUNQ(cpu)[pri]; | |
767 qp = qp->next) { | |
768 thread = _PR_THREAD_PTR(qp); | |
769 /* | |
770 * skip non-schedulable threads | |
771 */ | |
772 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); | |
773 if (thread->no_sched) { | |
774 thread = NULL; | |
775 /* | |
776 * Need to wakeup cpus to avoid missing a | |
777 * runnable thread | |
778 * Waking up all CPU's need happen only once. | |
779 */ | |
780 | |
781 *wakeup_cpus = PR_TRUE; | |
782 continue; | |
783 } else if (thread->flags & _PR_BOUND_THREAD) { | |
784 /* | |
785 * Thread bound to cpu 0 | |
786 */ | |
787 | |
788 thread = NULL; | |
789 #ifdef IRIX | |
790 _PR_MD_WAKEUP_PRIMORDIAL_CPU(); | |
791 #endif | |
792 continue; | |
793 } else if (thread->io_pending == PR_TRUE) { | |
794 /* | |
795 * A thread that is blocked for I/O needs to run | |
796 * on the same cpu on which it was blocked. This is because | |
797 * the cpu's ioq is accessed without lock protection and scheduling | |
798 * the thread on a different cpu would preclude this optimization. | |
799 */ | |
800 thread = NULL; | |
801 continue; | |
802 } else { | |
803 /* Pull thread off of its run queue */ | |
804 _PR_DEL_RUNQ(thread); | |
805 _PR_RUNQ_UNLOCK(cpu); | |
806 return(thread); | |
807 } | |
808 } | |
809 } | |
810 thread = NULL; | |
811 } | |
812 _PR_RUNQ_UNLOCK(cpu); | |
813 return(thread); | |
814 } | |
815 #endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */ | |
816 | |
817 /* | |
818 ** Schedule this native thread by finding the highest priority nspr | |
819 ** thread that is ready to run. | |
820 ** | |
821 ** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls | |
822 ** PR_Schedule() rather than calling PR_Schedule. Otherwise if there | |
823 ** is initialization required for switching from SWITCH_CONTEXT, | |
824 ** it will not get done! | |
825 */ | |
826 void _PR_Schedule(void) | |
827 { | |
828 PRThread *thread, *me = _PR_MD_CURRENT_THREAD(); | |
829 _PRCPU *cpu = _PR_MD_CURRENT_CPU(); | |
830 PRIntn pri; | |
831 PRUint32 r; | |
832 PRCList *qp; | |
833 PRIntn priMin, priMax; | |
834 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) | |
835 PRBool wakeup_cpus; | |
836 #endif | |
837 | |
838 /* Interrupts must be disabled */ | |
839 PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); | |
840 | |
841 /* Since we are rescheduling, we no longer want to */ | |
842 _PR_CLEAR_RESCHED_FLAG(); | |
843 | |
844 /* | |
845 ** Find highest priority thread to run. Bigger priority numbers are | |
846 ** higher priority threads | |
847 */ | |
848 _PR_RUNQ_LOCK(cpu); | |
849 /* | |
850 * if we are in SuspendAll mode, can schedule only the thread | |
851 * that called PR_SuspendAll | |
852 * | |
853 * The thread may be ready to run now, after completing an I/O | |
854 * operation, for example | |
855 */ | |
856 if ((thread = suspendAllThread) != 0) { | |
857 if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) { | |
858 /* Pull thread off of its run queue */ | |
859 _PR_DEL_RUNQ(thread); | |
860 _PR_RUNQ_UNLOCK(cpu); | |
861 goto found_thread; | |
862 } else { | |
863 thread = NULL; | |
864 _PR_RUNQ_UNLOCK(cpu); | |
865 goto idle_thread; | |
866 } | |
867 } | |
868 r = _PR_RUNQREADYMASK(cpu); | |
869 if (r==0) { | |
870 priMin = priMax = PR_PRIORITY_FIRST; | |
871 } else if (r == (1<<PR_PRIORITY_NORMAL) ) { | |
872 priMin = priMax = PR_PRIORITY_NORMAL; | |
873 } else { | |
874 priMin = PR_PRIORITY_FIRST; | |
875 priMax = PR_PRIORITY_LAST; | |
876 } | |
877 thread = NULL; | |
878 for (pri = priMax; pri >= priMin ; pri-- ) { | |
879 if (r & (1 << pri)) { | |
880 for (qp = _PR_RUNQ(cpu)[pri].next; | |
881 qp != &_PR_RUNQ(cpu)[pri]; | |
882 qp = qp->next) { | |
883 thread = _PR_THREAD_PTR(qp); | |
884 /* | |
885 * skip non-schedulable threads | |
886 */ | |
887 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); | |
888 if ((thread->no_sched) && (me != thread)){ | |
889 thread = NULL; | |
890 continue; | |
891 } else { | |
892 /* Pull thread off of its run queue */ | |
893 _PR_DEL_RUNQ(thread); | |
894 _PR_RUNQ_UNLOCK(cpu); | |
895 goto found_thread; | |
896 } | |
897 } | |
898 } | |
899 thread = NULL; | |
900 } | |
901 _PR_RUNQ_UNLOCK(cpu); | |
902 | |
903 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) | |
904 | |
905 wakeup_cpus = PR_FALSE; | |
906 _PR_CPU_LIST_LOCK(); | |
907 for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) { | |
908 if (cpu != _PR_CPU_PTR(qp)) { | |
909 if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus)) | |
910 != NULL) { | |
911 thread->cpu = cpu; | |
912 _PR_CPU_LIST_UNLOCK(); | |
913 if (wakeup_cpus == PR_TRUE) | |
914 _PR_MD_WAKEUP_CPUS(); | |
915 goto found_thread; | |
916 } | |
917 } | |
918 } | |
919 _PR_CPU_LIST_UNLOCK(); | |
920 if (wakeup_cpus == PR_TRUE) | |
921 _PR_MD_WAKEUP_CPUS(); | |
922 | |
923 #endif /* _PR_LOCAL_THREADS_ONLY */ | |
924 | |
925 idle_thread: | |
926 /* | |
927 ** There are no threads to run. Switch to the idle thread | |
928 */ | |
929 PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing")); | |
930 thread = _PR_MD_CURRENT_CPU()->idle_thread; | |
931 | |
932 found_thread: | |
933 PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) && | |
934 (!(thread->no_sched)))); | |
935 | |
936 /* Resume the thread */ | |
937 PR_LOG(_pr_sched_lm, PR_LOG_MAX, | |
938 ("switching to %d[%p]", thread->id, thread)); | |
939 PR_ASSERT(thread->state != _PR_RUNNING); | |
940 thread->state = _PR_RUNNING; | |
941 | |
942 /* If we are on the runq, it just means that we went to sleep on some | |
943 * resource, and by the time we got here another real native thread had | |
944 * already given us the resource and put us back on the runqueue | |
945 */ | |
946 PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU()); | |
947 if (thread != me) | |
948 _PR_MD_RESTORE_CONTEXT(thread); | |
949 #if 0 | |
950 /* XXXMB; with setjmp/longjmp it is impossible to land here, but | |
951 * it is not with fibers... Is this a bad thing? I believe it is | |
952 * still safe. | |
953 */ | |
954 PR_NOT_REACHED("impossible return from schedule"); | |
955 #endif | |
956 } | |
957 | |
958 /* | |
959 ** Attaches a thread. | |
960 ** Does not set the _PR_MD_CURRENT_THREAD. | |
961 ** Does not specify the scope of the thread. | |
962 */ | |
963 static PRThread * | |
964 _PR_AttachThread(PRThreadType type, PRThreadPriority priority, | |
965 PRThreadStack *stack) | |
966 { | |
967 PRThread *thread; | |
968 char *mem; | |
969 | |
970 if (priority > PR_PRIORITY_LAST) { | |
971 priority = PR_PRIORITY_LAST; | |
972 } else if (priority < PR_PRIORITY_FIRST) { | |
973 priority = PR_PRIORITY_FIRST; | |
974 } | |
975 | |
976 mem = (char*) PR_CALLOC(sizeof(PRThread)); | |
977 if (mem) { | |
978 thread = (PRThread*) mem; | |
979 thread->priority = priority; | |
980 thread->stack = stack; | |
981 thread->state = _PR_RUNNING; | |
982 PR_INIT_CLIST(&thread->lockList); | |
983 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { | |
984 PR_DELETE(thread); | |
985 return 0; | |
986 } | |
987 | |
988 return thread; | |
989 } | |
990 return 0; | |
991 } | |
992 | |
993 | |
994 | |
995 PR_IMPLEMENT(PRThread*) | |
996 _PR_NativeCreateThread(PRThreadType type, | |
997 void (*start)(void *arg), | |
998 void *arg, | |
999 PRThreadPriority priority, | |
1000 PRThreadScope scope, | |
1001 PRThreadState state, | |
1002 PRUint32 stackSize, | |
1003 PRUint32 flags) | |
1004 { | |
1005 PRThread *thread; | |
1006 | |
1007 thread = _PR_AttachThread(type, priority, NULL); | |
1008 | |
1009 if (thread) { | |
1010 PR_Lock(_pr_activeLock); | |
1011 thread->flags = (flags | _PR_GLOBAL_SCOPE); | |
1012 thread->id = ++_pr_utid; | |
1013 if (type == PR_SYSTEM_THREAD) { | |
1014 thread->flags |= _PR_SYSTEM; | |
1015 _pr_systemActive++; | |
1016 } else { | |
1017 _pr_userActive++; | |
1018 } | |
1019 PR_Unlock(_pr_activeLock); | |
1020 | |
1021 thread->stack = PR_NEWZAP(PRThreadStack); | |
1022 if (!thread->stack) { | |
1023 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
1024 goto done; | |
1025 } | |
1026 thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE; | |
1027 thread->stack->thr = thread; | |
1028 thread->startFunc = start; | |
1029 thread->arg = arg; | |
1030 | |
1031 /* | |
1032 Set thread flags related to scope and joinable state. If joinable | |
1033 thread, allocate a "termination" conidition variable. | |
1034 */ | |
1035 if (state == PR_JOINABLE_THREAD) { | |
1036 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1037 if (thread->term == NULL) { | |
1038 PR_DELETE(thread->stack); | |
1039 goto done; | |
1040 } | |
1041 } | |
1042 | |
1043 thread->state = _PR_RUNNING; | |
1044 if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority, | |
1045 scope,state,stackSize) == PR_SUCCESS) { | |
1046 return thread; | |
1047 } | |
1048 if (thread->term) { | |
1049 PR_DestroyCondVar(thread->term); | |
1050 thread->term = NULL; | |
1051 } | |
1052 PR_DELETE(thread->stack); | |
1053 } | |
1054 | |
1055 done: | |
1056 if (thread) { | |
1057 _PR_DecrActiveThreadCount(thread); | |
1058 _PR_DestroyThread(thread); | |
1059 } | |
1060 return NULL; | |
1061 } | |
1062 | |
1063 /************************************************************************/ | |
1064 | |
1065 PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type, | |
1066 void (*start)(void *arg), | |
1067 void *arg, | |
1068 PRThreadPriority priority, | |
1069 PRThreadScope scope, | |
1070 PRThreadState state, | |
1071 PRUint32 stackSize, | |
1072 PRUint32 flags) | |
1073 { | |
1074 PRThread *me; | |
1075 PRThread *thread = NULL; | |
1076 PRThreadStack *stack; | |
1077 char *top; | |
1078 PRIntn is; | |
1079 PRIntn native = 0; | |
1080 PRIntn useRecycled = 0; | |
1081 PRBool status; | |
1082 | |
1083 /* | |
1084 First, pin down the priority. Not all compilers catch passing out of | |
1085 range enum here. If we let bad values thru, priority queues won't work. | |
1086 */ | |
1087 if (priority > PR_PRIORITY_LAST) { | |
1088 priority = PR_PRIORITY_LAST; | |
1089 } else if (priority < PR_PRIORITY_FIRST) { | |
1090 priority = PR_PRIORITY_FIRST; | |
1091 } | |
1092 | |
1093 if (!_pr_initialized) _PR_ImplicitInitialization(); | |
1094 | |
1095 if (! (flags & _PR_IDLE_THREAD)) | |
1096 me = _PR_MD_CURRENT_THREAD(); | |
1097 | |
1098 #if defined(_PR_GLOBAL_THREADS_ONLY) | |
1099 /* | |
1100 * can create global threads only | |
1101 */ | |
1102 if (scope == PR_LOCAL_THREAD) | |
1103 scope = PR_GLOBAL_THREAD; | |
1104 #endif | |
1105 | |
1106 if (_native_threads_only) | |
1107 scope = PR_GLOBAL_THREAD; | |
1108 | |
1109 native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD)) | |
1110 && _PR_IS_NATIVE_THREAD_SUPPORTED()); | |
1111 | |
1112 _PR_ADJUST_STACKSIZE(stackSize); | |
1113 | |
1114 if (native) { | |
1115 /* | |
1116 * clear the IDLE_THREAD flag which applies to LOCAL | |
1117 * threads only | |
1118 */ | |
1119 flags &= ~_PR_IDLE_THREAD; | |
1120 flags |= _PR_GLOBAL_SCOPE; | |
1121 if (_PR_NUM_DEADNATIVE > 0) { | |
1122 _PR_DEADQ_LOCK; | |
1123 | |
1124 if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */ | |
1125 _PR_DEADQ_UNLOCK; | |
1126 } else { | |
1127 thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next); | |
1128 PR_REMOVE_LINK(&thread->links); | |
1129 _PR_DEC_DEADNATIVE; | |
1130 _PR_DEADQ_UNLOCK; | |
1131 | |
1132 _PR_InitializeRecycledThread(thread); | |
1133 thread->startFunc = start; | |
1134 thread->arg = arg; | |
1135 thread->flags = (flags | _PR_GLOBAL_SCOPE); | |
1136 if (type == PR_SYSTEM_THREAD) | |
1137 { | |
1138 thread->flags |= _PR_SYSTEM; | |
1139 PR_ATOMIC_INCREMENT(&_pr_systemActive); | |
1140 } | |
1141 else PR_ATOMIC_INCREMENT(&_pr_userActive); | |
1142 | |
1143 if (state == PR_JOINABLE_THREAD) { | |
1144 if (!thread->term) | |
1145 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1146 } | |
1147 else { | |
1148 if(thread->term) { | |
1149 PR_DestroyCondVar(thread->term); | |
1150 thread->term = 0; | |
1151 } | |
1152 } | |
1153 | |
1154 thread->priority = priority; | |
1155 _PR_MD_SET_PRIORITY(&(thread->md), priority); | |
1156 /* XXX what about stackSize? */ | |
1157 thread->state = _PR_RUNNING; | |
1158 _PR_MD_WAKEUP_WAITER(thread); | |
1159 return thread; | |
1160 } | |
1161 } | |
1162 thread = _PR_NativeCreateThread(type, start, arg, priority, | |
1163 scope, state, stackSize, flags); | |
1164 } else { | |
1165 if (_PR_NUM_DEADUSER > 0) { | |
1166 _PR_DEADQ_LOCK; | |
1167 | |
1168 if (_PR_NUM_DEADUSER == 0) { /* thread safe check */ | |
1169 _PR_DEADQ_UNLOCK; | |
1170 } else { | |
1171 PRCList *ptr; | |
1172 | |
1173 /* Go down list checking for a recycled thread with a | |
1174 * large enough stack. XXXMB - this has a bad degenerate case. | |
1175 */ | |
1176 ptr = _PR_DEADUSERQ.next; | |
1177 while( ptr != &_PR_DEADUSERQ ) { | |
1178 thread = _PR_THREAD_PTR(ptr); | |
1179 if ((thread->stack->stackSize >= stackSize) && | |
1180 (!thread->no_sched)) { | |
1181 PR_REMOVE_LINK(&thread->links); | |
1182 _PR_DEC_DEADUSER; | |
1183 break; | |
1184 } else { | |
1185 ptr = ptr->next; | |
1186 thread = NULL; | |
1187 } | |
1188 } | |
1189 | |
1190 _PR_DEADQ_UNLOCK; | |
1191 | |
1192 if (thread) { | |
1193 _PR_InitializeRecycledThread(thread); | |
1194 thread->startFunc = start; | |
1195 thread->arg = arg; | |
1196 thread->priority = priority; | |
1197 if (state == PR_JOINABLE_THREAD) { | |
1198 if (!thread->term) | |
1199 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1200 } else { | |
1201 if(thread->term) { | |
1202 PR_DestroyCondVar(thread->term); | |
1203 thread->term = 0; | |
1204 } | |
1205 } | |
1206 useRecycled++; | |
1207 } | |
1208 } | |
1209 } | |
1210 if (thread == NULL) { | |
1211 #ifndef HAVE_CUSTOM_USER_THREADS | |
1212 stack = _PR_NewStack(stackSize); | |
1213 if (!stack) { | |
1214 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
1215 return NULL; | |
1216 } | |
1217 | |
1218 /* Allocate thread object and per-thread data off the top of the stack*/ | |
1219 top = stack->stackTop; | |
1220 #ifdef HAVE_STACK_GROWING_UP | |
1221 thread = (PRThread*) top; | |
1222 top = top + sizeof(PRThread); | |
1223 /* | |
1224 * Make stack 64-byte aligned | |
1225 */ | |
1226 if ((PRUptrdiff)top & 0x3f) { | |
1227 top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f); | |
1228 } | |
1229 #else | |
1230 top = top - sizeof(PRThread); | |
1231 thread = (PRThread*) top; | |
1232 /* | |
1233 * Make stack 64-byte aligned | |
1234 */ | |
1235 if ((PRUptrdiff)top & 0x3f) { | |
1236 top = (char*)((PRUptrdiff)top & ~0x3f); | |
1237 } | |
1238 #endif | |
1239 stack->thr = thread; | |
1240 memset(thread, 0, sizeof(PRThread)); | |
1241 thread->threadAllocatedOnStack = 1; | |
1242 #else | |
1243 thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg); | |
1244 if (!thread) { | |
1245 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
1246 return NULL; | |
1247 } | |
1248 thread->threadAllocatedOnStack = 0; | |
1249 stack = NULL; | |
1250 top = NULL; | |
1251 #endif | |
1252 | |
1253 /* Initialize thread */ | |
1254 thread->tpdLength = 0; | |
1255 thread->privateData = NULL; | |
1256 thread->stack = stack; | |
1257 thread->priority = priority; | |
1258 thread->startFunc = start; | |
1259 thread->arg = arg; | |
1260 PR_INIT_CLIST(&thread->lockList); | |
1261 | |
1262 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { | |
1263 if (thread->threadAllocatedOnStack == 1) | |
1264 _PR_FreeStack(thread->stack); | |
1265 else { | |
1266 PR_DELETE(thread); | |
1267 } | |
1268 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); | |
1269 return NULL; | |
1270 } | |
1271 | |
1272 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { | |
1273 if (thread->threadAllocatedOnStack == 1) | |
1274 _PR_FreeStack(thread->stack); | |
1275 else { | |
1276 PR_DELETE(thread->privateData); | |
1277 PR_DELETE(thread); | |
1278 } | |
1279 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); | |
1280 return NULL; | |
1281 } | |
1282 | |
1283 _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status); | |
1284 | |
1285 if (status == PR_FALSE) { | |
1286 _PR_MD_FREE_LOCK(&thread->threadLock); | |
1287 if (thread->threadAllocatedOnStack == 1) | |
1288 _PR_FreeStack(thread->stack); | |
1289 else { | |
1290 PR_DELETE(thread->privateData); | |
1291 PR_DELETE(thread); | |
1292 } | |
1293 return NULL; | |
1294 } | |
1295 | |
1296 /* | |
1297 Set thread flags related to scope and joinable state. If joinable | |
1298 thread, allocate a "termination" condition variable. | |
1299 */ | |
1300 if (state == PR_JOINABLE_THREAD) { | |
1301 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1302 if (thread->term == NULL) { | |
1303 _PR_MD_FREE_LOCK(&thread->threadLock); | |
1304 if (thread->threadAllocatedOnStack == 1) | |
1305 _PR_FreeStack(thread->stack); | |
1306 else { | |
1307 PR_DELETE(thread->privateData); | |
1308 PR_DELETE(thread); | |
1309 } | |
1310 return NULL; | |
1311 } | |
1312 } | |
1313 | |
1314 } | |
1315 | |
1316 /* Update thread type counter */ | |
1317 PR_Lock(_pr_activeLock); | |
1318 thread->flags = flags; | |
1319 thread->id = ++_pr_utid; | |
1320 if (type == PR_SYSTEM_THREAD) { | |
1321 thread->flags |= _PR_SYSTEM; | |
1322 _pr_systemActive++; | |
1323 } else { | |
1324 _pr_userActive++; | |
1325 } | |
1326 | |
1327 /* Make thread runnable */ | |
1328 thread->state = _PR_RUNNABLE; | |
1329 /* | |
1330 * Add to list of active threads | |
1331 */ | |
1332 PR_Unlock(_pr_activeLock); | |
1333 | |
1334 if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) ) | |
1335 thread->cpu = _PR_GetPrimordialCPU(); | |
1336 else | |
1337 thread->cpu = _PR_MD_CURRENT_CPU(); | |
1338 | |
1339 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); | |
1340 | |
1341 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) { | |
1342 _PR_INTSOFF(is); | |
1343 _PR_RUNQ_LOCK(thread->cpu); | |
1344 _PR_ADD_RUNQ(thread, thread->cpu, priority); | |
1345 _PR_RUNQ_UNLOCK(thread->cpu); | |
1346 } | |
1347 | |
1348 if (thread->flags & _PR_IDLE_THREAD) { | |
1349 /* | |
1350 ** If the creating thread is a kernel thread, we need to | |
1351 ** awaken the user thread idle thread somehow; potentially | |
1352 ** it could be sleeping in its idle loop, and we need to poke | |
1353 ** it. To do so, wake the idle thread... | |
1354 */ | |
1355 _PR_MD_WAKEUP_WAITER(NULL); | |
1356 } else if (_PR_IS_NATIVE_THREAD(me)) { | |
1357 _PR_MD_WAKEUP_WAITER(thread); | |
1358 } | |
1359 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) ) | |
1360 _PR_INTSON(is); | |
1361 } | |
1362 | |
1363 return thread; | |
1364 } | |
1365 | |
1366 PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type, | |
1367 void (*start)(void *arg), | |
1368 void *arg, | |
1369 PRThreadPriority priority, | |
1370 PRThreadScope scope, | |
1371 PRThreadState state, | |
1372 PRUint32 stackSize) | |
1373 { | |
1374 return _PR_CreateThread(type, start, arg, priority, scope, state, | |
1375 stackSize, 0); | |
1376 } | |
1377 | |
1378 /* | |
1379 ** Associate a thread object with an existing native thread. | |
1380 ** "type" is the type of thread object to attach | |
1381 ** "priority" is the priority to assign to the thread | |
1382 ** "stack" defines the shape of the threads stack | |
1383 ** | |
1384 ** This can return NULL if some kind of error occurs, or if memory is | |
1385 ** tight. | |
1386 ** | |
1387 ** This call is not normally needed unless you create your own native | |
1388 ** thread. PR_Init does this automatically for the primordial thread. | |
1389 */ | |
1390 PRThread* _PRI_AttachThread(PRThreadType type, | |
1391 PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags) | |
1392 { | |
1393 PRThread *thread; | |
1394 | |
1395 if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) { | |
1396 return thread; | |
1397 } | |
1398 _PR_MD_SET_CURRENT_THREAD(NULL); | |
1399 | |
1400 /* Clear out any state if this thread was attached before */ | |
1401 _PR_MD_SET_CURRENT_CPU(NULL); | |
1402 | |
1403 thread = _PR_AttachThread(type, priority, stack); | |
1404 if (thread) { | |
1405 PRIntn is; | |
1406 | |
1407 _PR_MD_SET_CURRENT_THREAD(thread); | |
1408 | |
1409 thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED; | |
1410 | |
1411 if (!stack) { | |
1412 thread->stack = PR_NEWZAP(PRThreadStack); | |
1413 if (!thread->stack) { | |
1414 _PR_DestroyThread(thread); | |
1415 return NULL; | |
1416 } | |
1417 thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE; | |
1418 } | |
1419 PR_INIT_CLIST(&thread->links); | |
1420 | |
1421 if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) { | |
1422 PR_DELETE(thread->stack); | |
1423 _PR_DestroyThread(thread); | |
1424 return NULL; | |
1425 } | |
1426 | |
1427 _PR_MD_SET_CURRENT_CPU(NULL); | |
1428 | |
1429 if (_PR_MD_CURRENT_CPU()) { | |
1430 _PR_INTSOFF(is); | |
1431 PR_Lock(_pr_activeLock); | |
1432 } | |
1433 if (type == PR_SYSTEM_THREAD) { | |
1434 thread->flags |= _PR_SYSTEM; | |
1435 _pr_systemActive++; | |
1436 } else { | |
1437 _pr_userActive++; | |
1438 } | |
1439 if (_PR_MD_CURRENT_CPU()) { | |
1440 PR_Unlock(_pr_activeLock); | |
1441 _PR_INTSON(is); | |
1442 } | |
1443 } | |
1444 return thread; | |
1445 } | |
1446 | |
1447 PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type, | |
1448 PRThreadPriority priority, PRThreadStack *stack) | |
1449 { | |
1450 return PR_GetCurrentThread(); | |
1451 } | |
1452 | |
1453 PR_IMPLEMENT(void) PR_DetachThread(void) | |
1454 { | |
1455 /* | |
1456 * On IRIX, Solaris, and Windows, foreign threads are detached when | |
1457 * they terminate. | |
1458 */ | |
1459 #if !defined(IRIX) && !defined(WIN32) \ | |
1460 && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY)) | |
1461 PRThread *me; | |
1462 if (_pr_initialized) { | |
1463 me = _PR_MD_GET_ATTACHED_THREAD(); | |
1464 if ((me != NULL) && (me->flags & _PR_ATTACHED)) | |
1465 _PRI_DetachThread(); | |
1466 } | |
1467 #endif | |
1468 } | |
1469 | |
1470 void _PRI_DetachThread(void) | |
1471 { | |
1472 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1473 | |
1474 if (me->flags & _PR_PRIMORDIAL) { | |
1475 /* | |
1476 * ignore, if primordial thread | |
1477 */ | |
1478 return; | |
1479 } | |
1480 PR_ASSERT(me->flags & _PR_ATTACHED); | |
1481 PR_ASSERT(_PR_IS_NATIVE_THREAD(me)); | |
1482 _PR_CleanupThread(me); | |
1483 PR_DELETE(me->privateData); | |
1484 | |
1485 _PR_DecrActiveThreadCount(me); | |
1486 | |
1487 _PR_MD_CLEAN_THREAD(me); | |
1488 _PR_MD_SET_CURRENT_THREAD(NULL); | |
1489 if (!me->threadAllocatedOnStack) | |
1490 PR_DELETE(me->stack); | |
1491 _PR_MD_FREE_LOCK(&me->threadLock); | |
1492 PR_DELETE(me); | |
1493 } | |
1494 | |
1495 /* | |
1496 ** Wait for thread termination: | |
1497 ** "thread" is the target thread | |
1498 ** | |
1499 ** This can return PR_FAILURE if no joinable thread could be found | |
1500 ** corresponding to the specified target thread. | |
1501 ** | |
1502 ** The calling thread is suspended until the target thread completes. | |
1503 ** Several threads cannot wait for the same thread to complete; one thread | |
1504 ** will complete successfully and others will terminate with an error PR_FAILURE. | |
1505 ** The calling thread will not be blocked if the target thread has already | |
1506 ** terminated. | |
1507 */ | |
1508 PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread) | |
1509 { | |
1510 PRIntn is; | |
1511 PRCondVar *term; | |
1512 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1513 | |
1514 if (!_PR_IS_NATIVE_THREAD(me)) | |
1515 _PR_INTSOFF(is); | |
1516 term = thread->term; | |
1517 /* can't join a non-joinable thread */ | |
1518 if (term == NULL) { | |
1519 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
1520 goto ErrorExit; | |
1521 } | |
1522 | |
1523 /* multiple threads can't wait on the same joinable thread */ | |
1524 if (term->condQ.next != &term->condQ) { | |
1525 goto ErrorExit; | |
1526 } | |
1527 if (!_PR_IS_NATIVE_THREAD(me)) | |
1528 _PR_INTSON(is); | |
1529 | |
1530 /* wait for the target thread's termination cv invariant */ | |
1531 PR_Lock (_pr_terminationCVLock); | |
1532 while (thread->state != _PR_JOIN_WAIT) { | |
1533 (void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT); | |
1534 } | |
1535 (void) PR_Unlock (_pr_terminationCVLock); | |
1536 | |
1537 /* | |
1538 Remove target thread from global waiting to join Q; make it runnable | |
1539 again and put it back on its run Q. When it gets scheduled later in | |
1540 _PR_RunThread code, it will clean up its stack. | |
1541 */ | |
1542 if (!_PR_IS_NATIVE_THREAD(me)) | |
1543 _PR_INTSOFF(is); | |
1544 thread->state = _PR_RUNNABLE; | |
1545 if ( !_PR_IS_NATIVE_THREAD(thread) ) { | |
1546 _PR_THREAD_LOCK(thread); | |
1547 | |
1548 _PR_MISCQ_LOCK(thread->cpu); | |
1549 _PR_DEL_JOINQ(thread); | |
1550 _PR_MISCQ_UNLOCK(thread->cpu); | |
1551 | |
1552 _PR_AddThreadToRunQ(me, thread); | |
1553 _PR_THREAD_UNLOCK(thread); | |
1554 } | |
1555 if (!_PR_IS_NATIVE_THREAD(me)) | |
1556 _PR_INTSON(is); | |
1557 | |
1558 _PR_MD_WAKEUP_WAITER(thread); | |
1559 | |
1560 return PR_SUCCESS; | |
1561 | |
1562 ErrorExit: | |
1563 if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); | |
1564 return PR_FAILURE; | |
1565 } | |
1566 | |
1567 PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread, | |
1568 PRThreadPriority newPri) | |
1569 { | |
1570 | |
1571 /* | |
1572 First, pin down the priority. Not all compilers catch passing out of | |
1573 range enum here. If we let bad values thru, priority queues won't work. | |
1574 */ | |
1575 if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) { | |
1576 newPri = PR_PRIORITY_LAST; | |
1577 } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) { | |
1578 newPri = PR_PRIORITY_FIRST; | |
1579 } | |
1580 | |
1581 if ( _PR_IS_NATIVE_THREAD(thread) ) { | |
1582 thread->priority = newPri; | |
1583 _PR_MD_SET_PRIORITY(&(thread->md), newPri); | |
1584 } else _PR_SetThreadPriority(thread, newPri); | |
1585 } | |
1586 | |
1587 PR_IMPLEMENT(PRStatus) PR_SetCurrentThreadName(const char *name) | |
1588 { | |
1589 PRThread *thread; | |
1590 size_t nameLen; | |
1591 | |
1592 if (!name) { | |
1593 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
1594 return PR_FAILURE; | |
1595 } | |
1596 | |
1597 thread = PR_GetCurrentThread(); | |
1598 if (!thread) | |
1599 return PR_FAILURE; | |
1600 | |
1601 PR_Free(thread->name); | |
1602 nameLen = strlen(name); | |
1603 thread->name = (char *)PR_Malloc(nameLen + 1); | |
1604 if (!thread->name) | |
1605 return PR_FAILURE; | |
1606 memcpy(thread->name, name, nameLen + 1); | |
1607 _PR_MD_SET_CURRENT_THREAD_NAME(thread->name); | |
1608 return PR_SUCCESS; | |
1609 } | |
1610 | |
1611 PR_IMPLEMENT(const char *) PR_GetThreadName(const PRThread *thread) | |
1612 { | |
1613 if (!thread) | |
1614 return NULL; | |
1615 return thread->name; | |
1616 } | |
1617 | |
1618 | |
1619 /* | |
1620 ** This routine prevents all other threads from running. This call is needed by | |
1621 ** the garbage collector. | |
1622 */ | |
1623 PR_IMPLEMENT(void) PR_SuspendAll(void) | |
1624 { | |
1625 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1626 PRCList *qp; | |
1627 | |
1628 /* | |
1629 * Stop all user and native threads which are marked GC able. | |
1630 */ | |
1631 PR_Lock(_pr_activeLock); | |
1632 suspendAllOn = PR_TRUE; | |
1633 suspendAllThread = _PR_MD_CURRENT_THREAD(); | |
1634 _PR_MD_BEGIN_SUSPEND_ALL(); | |
1635 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; | |
1636 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { | |
1637 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1638 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) { | |
1639 _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); | |
1640 PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING); | |
1641 } | |
1642 } | |
1643 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; | |
1644 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { | |
1645 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1646 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) | |
1647 /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */ | |
1648 _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); | |
1649 } | |
1650 _PR_MD_END_SUSPEND_ALL(); | |
1651 } | |
1652 | |
1653 /* | |
1654 ** This routine unblocks all other threads that were suspended from running by | |
1655 ** PR_SuspendAll(). This call is needed by the garbage collector. | |
1656 */ | |
1657 PR_IMPLEMENT(void) PR_ResumeAll(void) | |
1658 { | |
1659 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1660 PRCList *qp; | |
1661 | |
1662 /* | |
1663 * Resume all user and native threads which are marked GC able. | |
1664 */ | |
1665 _PR_MD_BEGIN_RESUME_ALL(); | |
1666 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; | |
1667 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { | |
1668 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1669 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) | |
1670 _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp)); | |
1671 } | |
1672 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; | |
1673 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { | |
1674 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1675 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) | |
1676 _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); | |
1677 } | |
1678 _PR_MD_END_RESUME_ALL(); | |
1679 suspendAllThread = NULL; | |
1680 suspendAllOn = PR_FALSE; | |
1681 PR_Unlock(_pr_activeLock); | |
1682 } | |
1683 | |
1684 PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg) | |
1685 { | |
1686 PRCList *qp, *qp_next; | |
1687 PRIntn i = 0; | |
1688 PRStatus rv = PR_SUCCESS; | |
1689 PRThread* t; | |
1690 | |
1691 /* | |
1692 ** Currently Enumerate threads happen only with suspension and | |
1693 ** pr_activeLock held | |
1694 */ | |
1695 PR_ASSERT(suspendAllOn); | |
1696 | |
1697 /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking | |
1698 * qp->next after applying the function "func". In particular, "func" | |
1699 * might remove the thread from the queue and put it into another one in | |
1700 * which case qp->next no longer points to the next entry in the original | |
1701 * queue. | |
1702 * | |
1703 * To get around this problem, we save qp->next in qp_next before applying | |
1704 * "func" and use that saved value as the next value after applying "func". | |
1705 */ | |
1706 | |
1707 /* | |
1708 * Traverse the list of local and global threads | |
1709 */ | |
1710 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; | |
1711 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next) | |
1712 { | |
1713 qp_next = qp->next; | |
1714 t = _PR_ACTIVE_THREAD_PTR(qp); | |
1715 if (_PR_IS_GCABLE_THREAD(t)) | |
1716 { | |
1717 rv = (*func)(t, i, arg); | |
1718 if (rv != PR_SUCCESS) | |
1719 return rv; | |
1720 i++; | |
1721 } | |
1722 } | |
1723 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; | |
1724 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next) | |
1725 { | |
1726 qp_next = qp->next; | |
1727 t = _PR_ACTIVE_THREAD_PTR(qp); | |
1728 if (_PR_IS_GCABLE_THREAD(t)) | |
1729 { | |
1730 rv = (*func)(t, i, arg); | |
1731 if (rv != PR_SUCCESS) | |
1732 return rv; | |
1733 i++; | |
1734 } | |
1735 } | |
1736 return rv; | |
1737 } | |
1738 | |
1739 /* FUNCTION: _PR_AddSleepQ | |
1740 ** DESCRIPTION: | |
1741 ** Adds a thread to the sleep/pauseQ. | |
1742 ** RESTRICTIONS: | |
1743 ** Caller must have the RUNQ lock. | |
1744 ** Caller must be a user level thread | |
1745 */ | |
1746 PR_IMPLEMENT(void) | |
1747 _PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout) | |
1748 { | |
1749 _PRCPU *cpu = thread->cpu; | |
1750 | |
1751 if (timeout == PR_INTERVAL_NO_TIMEOUT) { | |
1752 /* append the thread to the global pause Q */ | |
1753 PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu)); | |
1754 thread->flags |= _PR_ON_PAUSEQ; | |
1755 } else { | |
1756 PRIntervalTime sleep; | |
1757 PRCList *q; | |
1758 PRThread *t; | |
1759 | |
1760 /* sort onto global sleepQ */ | |
1761 sleep = timeout; | |
1762 | |
1763 /* Check if we are longest timeout */ | |
1764 if (timeout >= _PR_SLEEPQMAX(cpu)) { | |
1765 PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu)); | |
1766 thread->sleep = timeout - _PR_SLEEPQMAX(cpu); | |
1767 _PR_SLEEPQMAX(cpu) = timeout; | |
1768 } else { | |
1769 /* Sort thread into global sleepQ at appropriate point */ | |
1770 q = _PR_SLEEPQ(cpu).next; | |
1771 | |
1772 /* Now scan the list for where to insert this entry */ | |
1773 while (q != &_PR_SLEEPQ(cpu)) { | |
1774 t = _PR_THREAD_PTR(q); | |
1775 if (sleep < t->sleep) { | |
1776 /* Found sleeper to insert in front of */ | |
1777 break; | |
1778 } | |
1779 sleep -= t->sleep; | |
1780 q = q->next; | |
1781 } | |
1782 thread->sleep = sleep; | |
1783 PR_INSERT_BEFORE(&thread->links, q); | |
1784 | |
1785 /* | |
1786 ** Subtract our sleep time from the sleeper that follows us (there | |
1787 ** must be one) so that they remain relative to us. | |
1788 */ | |
1789 PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu)); | |
1790 | |
1791 t = _PR_THREAD_PTR(thread->links.next); | |
1792 PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread); | |
1793 t->sleep -= sleep; | |
1794 } | |
1795 | |
1796 thread->flags |= _PR_ON_SLEEPQ; | |
1797 } | |
1798 } | |
1799 | |
1800 /* FUNCTION: _PR_DelSleepQ | |
1801 ** DESCRIPTION: | |
1802 ** Removes a thread from the sleep/pauseQ. | |
1803 ** INPUTS: | |
1804 ** If propogate_time is true, then the thread following the deleted | |
1805 ** thread will be get the time from the deleted thread. This is used | |
1806 ** when deleting a sleeper that has not timed out. | |
1807 ** RESTRICTIONS: | |
1808 ** Caller must have the RUNQ lock. | |
1809 ** Caller must be a user level thread | |
1810 */ | |
1811 PR_IMPLEMENT(void) | |
1812 _PR_DelSleepQ(PRThread *thread, PRBool propogate_time) | |
1813 { | |
1814 _PRCPU *cpu = thread->cpu; | |
1815 | |
1816 /* Remove from pauseQ/sleepQ */ | |
1817 if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
1818 if (thread->flags & _PR_ON_SLEEPQ) { | |
1819 PRCList *q = thread->links.next; | |
1820 if (q != &_PR_SLEEPQ(cpu)) { | |
1821 if (propogate_time == PR_TRUE) { | |
1822 PRThread *after = _PR_THREAD_PTR(q); | |
1823 after->sleep += thread->sleep; | |
1824 } else | |
1825 _PR_SLEEPQMAX(cpu) -= thread->sleep; | |
1826 } else { | |
1827 /* Check if prev is the beggining of the list; if so, | |
1828 * we are the only element on the list. | |
1829 */ | |
1830 if (thread->links.prev != &_PR_SLEEPQ(cpu)) | |
1831 _PR_SLEEPQMAX(cpu) -= thread->sleep; | |
1832 else | |
1833 _PR_SLEEPQMAX(cpu) = 0; | |
1834 } | |
1835 thread->flags &= ~_PR_ON_SLEEPQ; | |
1836 } else { | |
1837 thread->flags &= ~_PR_ON_PAUSEQ; | |
1838 } | |
1839 PR_REMOVE_LINK(&thread->links); | |
1840 } else | |
1841 PR_ASSERT(0); | |
1842 } | |
1843 | |
1844 void | |
1845 _PR_AddThreadToRunQ( | |
1846 PRThread *me, /* the current thread */ | |
1847 PRThread *thread) /* the local thread to be added to a run queue */ | |
1848 { | |
1849 PRThreadPriority pri = thread->priority; | |
1850 _PRCPU *cpu = thread->cpu; | |
1851 | |
1852 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); | |
1853 | |
1854 #if defined(WINNT) | |
1855 /* | |
1856 * On NT, we can only reliably know that the current CPU | |
1857 * is not idle. We add the awakened thread to the run | |
1858 * queue of its CPU if its CPU is the current CPU. | |
1859 * For any other CPU, we don't really know whether it | |
1860 * is busy or idle. So in all other cases, we just | |
1861 * "post" the awakened thread to the IO completion port | |
1862 * for the next idle CPU to execute (this is done in | |
1863 * _PR_MD_WAKEUP_WAITER). | |
1864 * Threads with a suspended I/O operation remain bound to | |
1865 * the same cpu until I/O is cancelled | |
1866 * | |
1867 * NOTE: the boolean expression below must be the exact | |
1868 * opposite of the corresponding boolean expression in | |
1869 * _PR_MD_WAKEUP_WAITER. | |
1870 */ | |
1871 if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) || | |
1872 (thread->md.thr_bound_cpu)) { | |
1873 PR_ASSERT(!thread->md.thr_bound_cpu || | |
1874 (thread->md.thr_bound_cpu == cpu)); | |
1875 _PR_RUNQ_LOCK(cpu); | |
1876 _PR_ADD_RUNQ(thread, cpu, pri); | |
1877 _PR_RUNQ_UNLOCK(cpu); | |
1878 } | |
1879 #else | |
1880 _PR_RUNQ_LOCK(cpu); | |
1881 _PR_ADD_RUNQ(thread, cpu, pri); | |
1882 _PR_RUNQ_UNLOCK(cpu); | |
1883 if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) { | |
1884 if (pri > me->priority) { | |
1885 _PR_SET_RESCHED_FLAG(); | |
1886 } | |
1887 } | |
1888 #endif | |
1889 } |