Mercurial > trustbridge > nss-cmake-static
comparison nspr/pr/src/md/unix/unix.c @ 0:1e5118fa0cb1
This is NSS with a Cmake Buildsyste
To compile a static NSS library for Windows we've used the
Chromium-NSS fork and added a Cmake buildsystem to compile
it statically for Windows. See README.chromium for chromium
changes and README.trustbridge for our modifications.
author | Andre Heinecke <andre.heinecke@intevation.de> |
---|---|
date | Mon, 28 Jul 2014 10:47:06 +0200 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:1e5118fa0cb1 |
---|---|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
5 | |
6 #include "primpl.h" | |
7 | |
8 #include <string.h> | |
9 #include <signal.h> | |
10 #include <unistd.h> | |
11 #include <fcntl.h> | |
12 #include <sys/types.h> | |
13 #include <sys/socket.h> | |
14 #include <sys/time.h> | |
15 #include <sys/ioctl.h> | |
16 #include <sys/mman.h> | |
17 #include <unistd.h> | |
18 #include <sys/utsname.h> | |
19 | |
20 #ifdef _PR_POLL_AVAILABLE | |
21 #include <poll.h> | |
22 #endif | |
23 | |
24 /* To get FIONREAD */ | |
25 #if defined(UNIXWARE) | |
26 #include <sys/filio.h> | |
27 #endif | |
28 | |
29 #if defined(NTO) | |
30 #include <sys/statvfs.h> | |
31 #endif | |
32 | |
33 /* | |
34 * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or | |
35 * PRInt32* pointer to a _PRSockLen_t* pointer. | |
36 */ | |
37 #if defined(HAVE_SOCKLEN_T) \ | |
38 || (defined(__GLIBC__) && __GLIBC__ >= 2) | |
39 #define _PRSockLen_t socklen_t | |
40 #elif defined(IRIX) || defined(HPUX) || defined(OSF1) || defined(SOLARIS) \ | |
41 || defined(AIX4_1) || defined(LINUX) \ | |
42 || defined(BSDI) || defined(SCO) \ | |
43 || defined(DARWIN) \ | |
44 || defined(QNX) | |
45 #define _PRSockLen_t int | |
46 #elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) \ | |
47 || defined(NETBSD) || defined(OPENBSD) || defined(UNIXWARE) \ | |
48 || defined(DGUX) || defined(NTO) || defined(RISCOS) | |
49 #define _PRSockLen_t size_t | |
50 #else | |
51 #error "Cannot determine architecture" | |
52 #endif | |
53 | |
54 /* | |
55 ** Global lock variable used to bracket calls into rusty libraries that | |
56 ** aren't thread safe (like libc, libX, etc). | |
57 */ | |
58 static PRLock *_pr_rename_lock = NULL; | |
59 static PRMonitor *_pr_Xfe_mon = NULL; | |
60 | |
61 static PRInt64 minus_one; | |
62 | |
63 sigset_t timer_set; | |
64 | |
65 #if !defined(_PR_PTHREADS) | |
66 | |
67 static sigset_t empty_set; | |
68 | |
69 #ifdef SOLARIS | |
70 #include <sys/file.h> | |
71 #include <sys/filio.h> | |
72 #endif | |
73 | |
74 #ifndef PIPE_BUF | |
75 #define PIPE_BUF 512 | |
76 #endif | |
77 | |
78 /* | |
79 * _nspr_noclock - if set clock interrupts are disabled | |
80 */ | |
81 int _nspr_noclock = 1; | |
82 | |
83 #ifdef IRIX | |
84 extern PRInt32 _nspr_terminate_on_error; | |
85 #endif | |
86 | |
87 /* | |
88 * There is an assertion in this code that NSPR's definition of PRIOVec | |
89 * is bit compatible with UNIX' definition of a struct iovec. This is | |
90 * applicable to the 'writev()' operations where the types are casually | |
91 * cast to avoid warnings. | |
92 */ | |
93 | |
94 int _pr_md_pipefd[2] = { -1, -1 }; | |
95 static char _pr_md_pipebuf[PIPE_BUF]; | |
96 static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag, | |
97 PRIntervalTime timeout); | |
98 | |
99 _PRInterruptTable _pr_interruptTable[] = { | |
100 { | |
101 "clock", _PR_MISSED_CLOCK, _PR_ClockInterrupt, }, | |
102 { | |
103 0 } | |
104 }; | |
105 | |
106 void _MD_unix_init_running_cpu(_PRCPU *cpu) | |
107 { | |
108 PR_INIT_CLIST(&(cpu->md.md_unix.ioQ)); | |
109 cpu->md.md_unix.ioq_max_osfd = -1; | |
110 cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT; | |
111 } | |
112 | |
113 PRStatus _MD_open_dir(_MDDir *d, const char *name) | |
114 { | |
115 int err; | |
116 | |
117 d->d = opendir(name); | |
118 if (!d->d) { | |
119 err = _MD_ERRNO(); | |
120 _PR_MD_MAP_OPENDIR_ERROR(err); | |
121 return PR_FAILURE; | |
122 } | |
123 return PR_SUCCESS; | |
124 } | |
125 | |
126 PRInt32 _MD_close_dir(_MDDir *d) | |
127 { | |
128 int rv = 0, err; | |
129 | |
130 if (d->d) { | |
131 rv = closedir(d->d); | |
132 if (rv == -1) { | |
133 err = _MD_ERRNO(); | |
134 _PR_MD_MAP_CLOSEDIR_ERROR(err); | |
135 } | |
136 } | |
137 return rv; | |
138 } | |
139 | |
140 char * _MD_read_dir(_MDDir *d, PRIntn flags) | |
141 { | |
142 struct dirent *de; | |
143 int err; | |
144 | |
145 for (;;) { | |
146 /* | |
147 * XXX: readdir() is not MT-safe. There is an MT-safe version | |
148 * readdir_r() on some systems. | |
149 */ | |
150 _MD_ERRNO() = 0; | |
151 de = readdir(d->d); | |
152 if (!de) { | |
153 err = _MD_ERRNO(); | |
154 _PR_MD_MAP_READDIR_ERROR(err); | |
155 return 0; | |
156 } | |
157 if ((flags & PR_SKIP_DOT) && | |
158 (de->d_name[0] == '.') && (de->d_name[1] == 0)) | |
159 continue; | |
160 if ((flags & PR_SKIP_DOT_DOT) && | |
161 (de->d_name[0] == '.') && (de->d_name[1] == '.') && | |
162 (de->d_name[2] == 0)) | |
163 continue; | |
164 if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.')) | |
165 continue; | |
166 break; | |
167 } | |
168 return de->d_name; | |
169 } | |
170 | |
171 PRInt32 _MD_delete(const char *name) | |
172 { | |
173 PRInt32 rv, err; | |
174 #ifdef UNIXWARE | |
175 sigset_t set, oset; | |
176 #endif | |
177 | |
178 #ifdef UNIXWARE | |
179 sigfillset(&set); | |
180 sigprocmask(SIG_SETMASK, &set, &oset); | |
181 #endif | |
182 rv = unlink(name); | |
183 #ifdef UNIXWARE | |
184 sigprocmask(SIG_SETMASK, &oset, NULL); | |
185 #endif | |
186 if (rv == -1) { | |
187 err = _MD_ERRNO(); | |
188 _PR_MD_MAP_UNLINK_ERROR(err); | |
189 } | |
190 return(rv); | |
191 } | |
192 | |
193 PRInt32 _MD_rename(const char *from, const char *to) | |
194 { | |
195 PRInt32 rv = -1, err; | |
196 | |
197 /* | |
198 ** This is trying to enforce the semantics of WINDOZE' rename | |
199 ** operation. That means one is not allowed to rename over top | |
200 ** of an existing file. Holding a lock across these two function | |
201 ** and the open function is known to be a bad idea, but .... | |
202 */ | |
203 if (NULL != _pr_rename_lock) | |
204 PR_Lock(_pr_rename_lock); | |
205 if (0 == access(to, F_OK)) | |
206 PR_SetError(PR_FILE_EXISTS_ERROR, 0); | |
207 else | |
208 { | |
209 rv = rename(from, to); | |
210 if (rv < 0) { | |
211 err = _MD_ERRNO(); | |
212 _PR_MD_MAP_RENAME_ERROR(err); | |
213 } | |
214 } | |
215 if (NULL != _pr_rename_lock) | |
216 PR_Unlock(_pr_rename_lock); | |
217 return rv; | |
218 } | |
219 | |
220 PRInt32 _MD_access(const char *name, PRAccessHow how) | |
221 { | |
222 PRInt32 rv, err; | |
223 int amode; | |
224 | |
225 switch (how) { | |
226 case PR_ACCESS_WRITE_OK: | |
227 amode = W_OK; | |
228 break; | |
229 case PR_ACCESS_READ_OK: | |
230 amode = R_OK; | |
231 break; | |
232 case PR_ACCESS_EXISTS: | |
233 amode = F_OK; | |
234 break; | |
235 default: | |
236 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
237 rv = -1; | |
238 goto done; | |
239 } | |
240 rv = access(name, amode); | |
241 | |
242 if (rv < 0) { | |
243 err = _MD_ERRNO(); | |
244 _PR_MD_MAP_ACCESS_ERROR(err); | |
245 } | |
246 | |
247 done: | |
248 return(rv); | |
249 } | |
250 | |
251 PRInt32 _MD_mkdir(const char *name, PRIntn mode) | |
252 { | |
253 int rv, err; | |
254 | |
255 /* | |
256 ** This lock is used to enforce rename semantics as described | |
257 ** in PR_Rename. Look there for more fun details. | |
258 */ | |
259 if (NULL !=_pr_rename_lock) | |
260 PR_Lock(_pr_rename_lock); | |
261 rv = mkdir(name, mode); | |
262 if (rv < 0) { | |
263 err = _MD_ERRNO(); | |
264 _PR_MD_MAP_MKDIR_ERROR(err); | |
265 } | |
266 if (NULL !=_pr_rename_lock) | |
267 PR_Unlock(_pr_rename_lock); | |
268 return rv; | |
269 } | |
270 | |
271 PRInt32 _MD_rmdir(const char *name) | |
272 { | |
273 int rv, err; | |
274 | |
275 rv = rmdir(name); | |
276 if (rv == -1) { | |
277 err = _MD_ERRNO(); | |
278 _PR_MD_MAP_RMDIR_ERROR(err); | |
279 } | |
280 return rv; | |
281 } | |
282 | |
283 PRInt32 _MD_read(PRFileDesc *fd, void *buf, PRInt32 amount) | |
284 { | |
285 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
286 PRInt32 rv, err; | |
287 #ifndef _PR_USE_POLL | |
288 fd_set rd; | |
289 #else | |
290 struct pollfd pfd; | |
291 #endif /* _PR_USE_POLL */ | |
292 PRInt32 osfd = fd->secret->md.osfd; | |
293 | |
294 #ifndef _PR_USE_POLL | |
295 FD_ZERO(&rd); | |
296 FD_SET(osfd, &rd); | |
297 #else | |
298 pfd.fd = osfd; | |
299 pfd.events = POLLIN; | |
300 #endif /* _PR_USE_POLL */ | |
301 while ((rv = read(osfd,buf,amount)) == -1) { | |
302 err = _MD_ERRNO(); | |
303 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
304 if (fd->secret->nonblocking) { | |
305 break; | |
306 } | |
307 if (!_PR_IS_NATIVE_THREAD(me)) { | |
308 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, | |
309 PR_INTERVAL_NO_TIMEOUT)) < 0) | |
310 goto done; | |
311 } else { | |
312 #ifndef _PR_USE_POLL | |
313 while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL)) | |
314 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
315 /* retry _MD_SELECT() if it is interrupted */ | |
316 } | |
317 #else /* _PR_USE_POLL */ | |
318 while ((rv = _MD_POLL(&pfd, 1, -1)) | |
319 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
320 /* retry _MD_POLL() if it is interrupted */ | |
321 } | |
322 #endif /* _PR_USE_POLL */ | |
323 if (rv == -1) { | |
324 break; | |
325 } | |
326 } | |
327 if (_PR_PENDING_INTERRUPT(me)) | |
328 break; | |
329 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
330 continue; | |
331 } else { | |
332 break; | |
333 } | |
334 } | |
335 if (rv < 0) { | |
336 if (_PR_PENDING_INTERRUPT(me)) { | |
337 me->flags &= ~_PR_INTERRUPT; | |
338 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
339 } else { | |
340 _PR_MD_MAP_READ_ERROR(err); | |
341 } | |
342 } | |
343 done: | |
344 return(rv); | |
345 } | |
346 | |
347 PRInt32 _MD_write(PRFileDesc *fd, const void *buf, PRInt32 amount) | |
348 { | |
349 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
350 PRInt32 rv, err; | |
351 #ifndef _PR_USE_POLL | |
352 fd_set wd; | |
353 #else | |
354 struct pollfd pfd; | |
355 #endif /* _PR_USE_POLL */ | |
356 PRInt32 osfd = fd->secret->md.osfd; | |
357 | |
358 #ifndef _PR_USE_POLL | |
359 FD_ZERO(&wd); | |
360 FD_SET(osfd, &wd); | |
361 #else | |
362 pfd.fd = osfd; | |
363 pfd.events = POLLOUT; | |
364 #endif /* _PR_USE_POLL */ | |
365 while ((rv = write(osfd,buf,amount)) == -1) { | |
366 err = _MD_ERRNO(); | |
367 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
368 if (fd->secret->nonblocking) { | |
369 break; | |
370 } | |
371 if (!_PR_IS_NATIVE_THREAD(me)) { | |
372 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, | |
373 PR_INTERVAL_NO_TIMEOUT)) < 0) | |
374 goto done; | |
375 } else { | |
376 #ifndef _PR_USE_POLL | |
377 while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL)) | |
378 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
379 /* retry _MD_SELECT() if it is interrupted */ | |
380 } | |
381 #else /* _PR_USE_POLL */ | |
382 while ((rv = _MD_POLL(&pfd, 1, -1)) | |
383 == -1 && (err = _MD_ERRNO()) == EINTR) { | |
384 /* retry _MD_POLL() if it is interrupted */ | |
385 } | |
386 #endif /* _PR_USE_POLL */ | |
387 if (rv == -1) { | |
388 break; | |
389 } | |
390 } | |
391 if (_PR_PENDING_INTERRUPT(me)) | |
392 break; | |
393 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
394 continue; | |
395 } else { | |
396 break; | |
397 } | |
398 } | |
399 if (rv < 0) { | |
400 if (_PR_PENDING_INTERRUPT(me)) { | |
401 me->flags &= ~_PR_INTERRUPT; | |
402 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
403 } else { | |
404 _PR_MD_MAP_WRITE_ERROR(err); | |
405 } | |
406 } | |
407 done: | |
408 return(rv); | |
409 } | |
410 | |
411 PRInt32 _MD_fsync(PRFileDesc *fd) | |
412 { | |
413 PRInt32 rv, err; | |
414 | |
415 rv = fsync(fd->secret->md.osfd); | |
416 if (rv == -1) { | |
417 err = _MD_ERRNO(); | |
418 _PR_MD_MAP_FSYNC_ERROR(err); | |
419 } | |
420 return(rv); | |
421 } | |
422 | |
423 PRInt32 _MD_close(PRInt32 osfd) | |
424 { | |
425 PRInt32 rv, err; | |
426 | |
427 rv = close(osfd); | |
428 if (rv == -1) { | |
429 err = _MD_ERRNO(); | |
430 _PR_MD_MAP_CLOSE_ERROR(err); | |
431 } | |
432 return(rv); | |
433 } | |
434 | |
435 PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto) | |
436 { | |
437 PRInt32 osfd, err; | |
438 | |
439 osfd = socket(domain, type, proto); | |
440 | |
441 if (osfd == -1) { | |
442 err = _MD_ERRNO(); | |
443 _PR_MD_MAP_SOCKET_ERROR(err); | |
444 return(osfd); | |
445 } | |
446 | |
447 return(osfd); | |
448 } | |
449 | |
450 PRInt32 _MD_socketavailable(PRFileDesc *fd) | |
451 { | |
452 PRInt32 result; | |
453 | |
454 if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) { | |
455 _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO()); | |
456 return -1; | |
457 } | |
458 return result; | |
459 } | |
460 | |
461 PRInt64 _MD_socketavailable64(PRFileDesc *fd) | |
462 { | |
463 PRInt64 result; | |
464 LL_I2L(result, _MD_socketavailable(fd)); | |
465 return result; | |
466 } /* _MD_socketavailable64 */ | |
467 | |
468 #define READ_FD 1 | |
469 #define WRITE_FD 2 | |
470 | |
471 /* | |
472 * socket_io_wait -- | |
473 * | |
474 * wait for socket i/o, periodically checking for interrupt | |
475 * | |
476 * The first implementation uses select(), for platforms without | |
477 * poll(). The second (preferred) implementation uses poll(). | |
478 */ | |
479 | |
480 #ifndef _PR_USE_POLL | |
481 | |
482 static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, | |
483 PRIntervalTime timeout) | |
484 { | |
485 PRInt32 rv = -1; | |
486 struct timeval tv; | |
487 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
488 PRIntervalTime epoch, now, elapsed, remaining; | |
489 PRBool wait_for_remaining; | |
490 PRInt32 syserror; | |
491 fd_set rd_wr; | |
492 | |
493 switch (timeout) { | |
494 case PR_INTERVAL_NO_WAIT: | |
495 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
496 break; | |
497 case PR_INTERVAL_NO_TIMEOUT: | |
498 /* | |
499 * This is a special case of the 'default' case below. | |
500 * Please see the comments there. | |
501 */ | |
502 tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; | |
503 tv.tv_usec = 0; | |
504 FD_ZERO(&rd_wr); | |
505 do { | |
506 FD_SET(osfd, &rd_wr); | |
507 if (fd_type == READ_FD) | |
508 rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); | |
509 else | |
510 rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); | |
511 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
512 _PR_MD_MAP_SELECT_ERROR(syserror); | |
513 break; | |
514 } | |
515 if (_PR_PENDING_INTERRUPT(me)) { | |
516 me->flags &= ~_PR_INTERRUPT; | |
517 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
518 rv = -1; | |
519 break; | |
520 } | |
521 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
522 break; | |
523 default: | |
524 now = epoch = PR_IntervalNow(); | |
525 remaining = timeout; | |
526 FD_ZERO(&rd_wr); | |
527 do { | |
528 /* | |
529 * We block in _MD_SELECT for at most | |
530 * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, | |
531 * so that there is an upper limit on the delay | |
532 * before the interrupt bit is checked. | |
533 */ | |
534 wait_for_remaining = PR_TRUE; | |
535 tv.tv_sec = PR_IntervalToSeconds(remaining); | |
536 if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) { | |
537 wait_for_remaining = PR_FALSE; | |
538 tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS; | |
539 tv.tv_usec = 0; | |
540 } else { | |
541 tv.tv_usec = PR_IntervalToMicroseconds( | |
542 remaining - | |
543 PR_SecondsToInterval(tv.tv_sec)); | |
544 } | |
545 FD_SET(osfd, &rd_wr); | |
546 if (fd_type == READ_FD) | |
547 rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv); | |
548 else | |
549 rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv); | |
550 /* | |
551 * we don't consider EINTR a real error | |
552 */ | |
553 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
554 _PR_MD_MAP_SELECT_ERROR(syserror); | |
555 break; | |
556 } | |
557 if (_PR_PENDING_INTERRUPT(me)) { | |
558 me->flags &= ~_PR_INTERRUPT; | |
559 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
560 rv = -1; | |
561 break; | |
562 } | |
563 /* | |
564 * We loop again if _MD_SELECT timed out or got interrupted | |
565 * by a signal, and the timeout deadline has not passed yet. | |
566 */ | |
567 if (rv == 0 || (rv == -1 && syserror == EINTR)) { | |
568 /* | |
569 * If _MD_SELECT timed out, we know how much time | |
570 * we spent in blocking, so we can avoid a | |
571 * PR_IntervalNow() call. | |
572 */ | |
573 if (rv == 0) { | |
574 if (wait_for_remaining) { | |
575 now += remaining; | |
576 } else { | |
577 now += PR_SecondsToInterval(tv.tv_sec) | |
578 + PR_MicrosecondsToInterval(tv.tv_usec); | |
579 } | |
580 } else { | |
581 now = PR_IntervalNow(); | |
582 } | |
583 elapsed = (PRIntervalTime) (now - epoch); | |
584 if (elapsed >= timeout) { | |
585 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
586 rv = -1; | |
587 break; | |
588 } else { | |
589 remaining = timeout - elapsed; | |
590 } | |
591 } | |
592 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
593 break; | |
594 } | |
595 return(rv); | |
596 } | |
597 | |
598 #else /* _PR_USE_POLL */ | |
599 | |
600 static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type, | |
601 PRIntervalTime timeout) | |
602 { | |
603 PRInt32 rv = -1; | |
604 int msecs; | |
605 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
606 PRIntervalTime epoch, now, elapsed, remaining; | |
607 PRBool wait_for_remaining; | |
608 PRInt32 syserror; | |
609 struct pollfd pfd; | |
610 | |
611 switch (timeout) { | |
612 case PR_INTERVAL_NO_WAIT: | |
613 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
614 break; | |
615 case PR_INTERVAL_NO_TIMEOUT: | |
616 /* | |
617 * This is a special case of the 'default' case below. | |
618 * Please see the comments there. | |
619 */ | |
620 msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; | |
621 pfd.fd = osfd; | |
622 if (fd_type == READ_FD) { | |
623 pfd.events = POLLIN; | |
624 } else { | |
625 pfd.events = POLLOUT; | |
626 } | |
627 do { | |
628 rv = _MD_POLL(&pfd, 1, msecs); | |
629 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
630 _PR_MD_MAP_POLL_ERROR(syserror); | |
631 break; | |
632 } | |
633 /* | |
634 * If POLLERR is set, don't process it; retry the operation | |
635 */ | |
636 if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { | |
637 rv = -1; | |
638 _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); | |
639 break; | |
640 } | |
641 if (_PR_PENDING_INTERRUPT(me)) { | |
642 me->flags &= ~_PR_INTERRUPT; | |
643 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
644 rv = -1; | |
645 break; | |
646 } | |
647 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
648 break; | |
649 default: | |
650 now = epoch = PR_IntervalNow(); | |
651 remaining = timeout; | |
652 pfd.fd = osfd; | |
653 if (fd_type == READ_FD) { | |
654 pfd.events = POLLIN; | |
655 } else { | |
656 pfd.events = POLLOUT; | |
657 } | |
658 do { | |
659 /* | |
660 * We block in _MD_POLL for at most | |
661 * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds, | |
662 * so that there is an upper limit on the delay | |
663 * before the interrupt bit is checked. | |
664 */ | |
665 wait_for_remaining = PR_TRUE; | |
666 msecs = PR_IntervalToMilliseconds(remaining); | |
667 if (msecs > _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000) { | |
668 wait_for_remaining = PR_FALSE; | |
669 msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000; | |
670 } | |
671 rv = _MD_POLL(&pfd, 1, msecs); | |
672 /* | |
673 * we don't consider EINTR a real error | |
674 */ | |
675 if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) { | |
676 _PR_MD_MAP_POLL_ERROR(syserror); | |
677 break; | |
678 } | |
679 if (_PR_PENDING_INTERRUPT(me)) { | |
680 me->flags &= ~_PR_INTERRUPT; | |
681 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
682 rv = -1; | |
683 break; | |
684 } | |
685 /* | |
686 * If POLLERR is set, don't process it; retry the operation | |
687 */ | |
688 if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) { | |
689 rv = -1; | |
690 _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents); | |
691 break; | |
692 } | |
693 /* | |
694 * We loop again if _MD_POLL timed out or got interrupted | |
695 * by a signal, and the timeout deadline has not passed yet. | |
696 */ | |
697 if (rv == 0 || (rv == -1 && syserror == EINTR)) { | |
698 /* | |
699 * If _MD_POLL timed out, we know how much time | |
700 * we spent in blocking, so we can avoid a | |
701 * PR_IntervalNow() call. | |
702 */ | |
703 if (rv == 0) { | |
704 if (wait_for_remaining) { | |
705 now += remaining; | |
706 } else { | |
707 now += PR_MillisecondsToInterval(msecs); | |
708 } | |
709 } else { | |
710 now = PR_IntervalNow(); | |
711 } | |
712 elapsed = (PRIntervalTime) (now - epoch); | |
713 if (elapsed >= timeout) { | |
714 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
715 rv = -1; | |
716 break; | |
717 } else { | |
718 remaining = timeout - elapsed; | |
719 } | |
720 } | |
721 } while (rv == 0 || (rv == -1 && syserror == EINTR)); | |
722 break; | |
723 } | |
724 return(rv); | |
725 } | |
726 | |
727 #endif /* _PR_USE_POLL */ | |
728 | |
729 static PRInt32 local_io_wait( | |
730 PRInt32 osfd, | |
731 PRInt32 wait_flag, | |
732 PRIntervalTime timeout) | |
733 { | |
734 _PRUnixPollDesc pd; | |
735 PRInt32 rv; | |
736 | |
737 PR_LOG(_pr_io_lm, PR_LOG_MIN, | |
738 ("waiting to %s on osfd=%d", | |
739 (wait_flag == _PR_UNIX_POLL_READ) ? "read" : "write", | |
740 osfd)); | |
741 | |
742 if (timeout == PR_INTERVAL_NO_WAIT) return 0; | |
743 | |
744 pd.osfd = osfd; | |
745 pd.in_flags = wait_flag; | |
746 pd.out_flags = 0; | |
747 | |
748 rv = _PR_WaitForMultipleFDs(&pd, 1, timeout); | |
749 | |
750 if (rv == 0) { | |
751 PR_SetError(PR_IO_TIMEOUT_ERROR, 0); | |
752 rv = -1; | |
753 } | |
754 return rv; | |
755 } | |
756 | |
757 | |
758 PRInt32 _MD_recv(PRFileDesc *fd, void *buf, PRInt32 amount, | |
759 PRInt32 flags, PRIntervalTime timeout) | |
760 { | |
761 PRInt32 osfd = fd->secret->md.osfd; | |
762 PRInt32 rv, err; | |
763 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
764 | |
765 /* | |
766 * Many OS's (Solaris, Unixware) have a broken recv which won't read | |
767 * from socketpairs. As long as we don't use flags on socketpairs, this | |
768 * is a decent fix. - mikep | |
769 */ | |
770 #if defined(UNIXWARE) || defined(SOLARIS) | |
771 while ((rv = read(osfd,buf,amount)) == -1) { | |
772 #else | |
773 while ((rv = recv(osfd,buf,amount,flags)) == -1) { | |
774 #endif | |
775 err = _MD_ERRNO(); | |
776 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
777 if (fd->secret->nonblocking) { | |
778 break; | |
779 } | |
780 if (!_PR_IS_NATIVE_THREAD(me)) { | |
781 if ((rv = local_io_wait(osfd,_PR_UNIX_POLL_READ,timeout)) < 0) | |
782 goto done; | |
783 } else { | |
784 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) | |
785 goto done; | |
786 } | |
787 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
788 continue; | |
789 } else { | |
790 break; | |
791 } | |
792 } | |
793 if (rv < 0) { | |
794 _PR_MD_MAP_RECV_ERROR(err); | |
795 } | |
796 done: | |
797 return(rv); | |
798 } | |
799 | |
800 PRInt32 _MD_recvfrom(PRFileDesc *fd, void *buf, PRInt32 amount, | |
801 PRIntn flags, PRNetAddr *addr, PRUint32 *addrlen, | |
802 PRIntervalTime timeout) | |
803 { | |
804 PRInt32 osfd = fd->secret->md.osfd; | |
805 PRInt32 rv, err; | |
806 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
807 | |
808 while ((*addrlen = PR_NETADDR_SIZE(addr)), | |
809 ((rv = recvfrom(osfd, buf, amount, flags, | |
810 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen)) == -1)) { | |
811 err = _MD_ERRNO(); | |
812 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
813 if (fd->secret->nonblocking) { | |
814 break; | |
815 } | |
816 if (!_PR_IS_NATIVE_THREAD(me)) { | |
817 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) | |
818 goto done; | |
819 } else { | |
820 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) | |
821 goto done; | |
822 } | |
823 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
824 continue; | |
825 } else { | |
826 break; | |
827 } | |
828 } | |
829 if (rv < 0) { | |
830 _PR_MD_MAP_RECVFROM_ERROR(err); | |
831 } | |
832 done: | |
833 #ifdef _PR_HAVE_SOCKADDR_LEN | |
834 if (rv != -1) { | |
835 /* ignore the sa_len field of struct sockaddr */ | |
836 if (addr) { | |
837 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
838 } | |
839 } | |
840 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
841 return(rv); | |
842 } | |
843 | |
844 PRInt32 _MD_send(PRFileDesc *fd, const void *buf, PRInt32 amount, | |
845 PRInt32 flags, PRIntervalTime timeout) | |
846 { | |
847 PRInt32 osfd = fd->secret->md.osfd; | |
848 PRInt32 rv, err; | |
849 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
850 #if defined(SOLARIS) | |
851 PRInt32 tmp_amount = amount; | |
852 #endif | |
853 | |
854 /* | |
855 * On pre-2.6 Solaris, send() is much slower than write(). | |
856 * On 2.6 and beyond, with in-kernel sockets, send() and | |
857 * write() are fairly equivalent in performance. | |
858 */ | |
859 #if defined(SOLARIS) | |
860 PR_ASSERT(0 == flags); | |
861 while ((rv = write(osfd,buf,tmp_amount)) == -1) { | |
862 #else | |
863 while ((rv = send(osfd,buf,amount,flags)) == -1) { | |
864 #endif | |
865 err = _MD_ERRNO(); | |
866 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
867 if (fd->secret->nonblocking) { | |
868 break; | |
869 } | |
870 if (!_PR_IS_NATIVE_THREAD(me)) { | |
871 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) | |
872 goto done; | |
873 } else { | |
874 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) | |
875 goto done; | |
876 } | |
877 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
878 continue; | |
879 } else { | |
880 #if defined(SOLARIS) | |
881 /* | |
882 * The write system call has been reported to return the ERANGE | |
883 * error on occasion. Try to write in smaller chunks to workaround | |
884 * this bug. | |
885 */ | |
886 if (err == ERANGE) { | |
887 if (tmp_amount > 1) { | |
888 tmp_amount = tmp_amount/2; /* half the bytes */ | |
889 continue; | |
890 } | |
891 } | |
892 #endif | |
893 break; | |
894 } | |
895 } | |
896 /* | |
897 * optimization; if bytes sent is less than "amount" call | |
898 * select before returning. This is because it is likely that | |
899 * the next send() call will return EWOULDBLOCK. | |
900 */ | |
901 if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) | |
902 && (timeout != PR_INTERVAL_NO_WAIT)) { | |
903 if (_PR_IS_NATIVE_THREAD(me)) { | |
904 if (socket_io_wait(osfd, WRITE_FD, timeout)< 0) { | |
905 rv = -1; | |
906 goto done; | |
907 } | |
908 } else { | |
909 if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) { | |
910 rv = -1; | |
911 goto done; | |
912 } | |
913 } | |
914 } | |
915 if (rv < 0) { | |
916 _PR_MD_MAP_SEND_ERROR(err); | |
917 } | |
918 done: | |
919 return(rv); | |
920 } | |
921 | |
922 PRInt32 _MD_sendto( | |
923 PRFileDesc *fd, const void *buf, PRInt32 amount, PRIntn flags, | |
924 const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout) | |
925 { | |
926 PRInt32 osfd = fd->secret->md.osfd; | |
927 PRInt32 rv, err; | |
928 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
929 #ifdef _PR_HAVE_SOCKADDR_LEN | |
930 PRNetAddr addrCopy; | |
931 | |
932 addrCopy = *addr; | |
933 ((struct sockaddr *) &addrCopy)->sa_len = addrlen; | |
934 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; | |
935 | |
936 while ((rv = sendto(osfd, buf, amount, flags, | |
937 (struct sockaddr *) &addrCopy, addrlen)) == -1) { | |
938 #else | |
939 while ((rv = sendto(osfd, buf, amount, flags, | |
940 (struct sockaddr *) addr, addrlen)) == -1) { | |
941 #endif | |
942 err = _MD_ERRNO(); | |
943 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
944 if (fd->secret->nonblocking) { | |
945 break; | |
946 } | |
947 if (!_PR_IS_NATIVE_THREAD(me)) { | |
948 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) | |
949 goto done; | |
950 } else { | |
951 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))< 0) | |
952 goto done; | |
953 } | |
954 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
955 continue; | |
956 } else { | |
957 break; | |
958 } | |
959 } | |
960 if (rv < 0) { | |
961 _PR_MD_MAP_SENDTO_ERROR(err); | |
962 } | |
963 done: | |
964 return(rv); | |
965 } | |
966 | |
967 PRInt32 _MD_writev( | |
968 PRFileDesc *fd, const PRIOVec *iov, | |
969 PRInt32 iov_size, PRIntervalTime timeout) | |
970 { | |
971 PRInt32 rv, err; | |
972 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
973 PRInt32 index, amount = 0; | |
974 PRInt32 osfd = fd->secret->md.osfd; | |
975 | |
976 /* | |
977 * Calculate the total number of bytes to be sent; needed for | |
978 * optimization later. | |
979 * We could avoid this if this number was passed in; but it is | |
980 * probably not a big deal because iov_size is usually small (less than | |
981 * 3) | |
982 */ | |
983 if (!fd->secret->nonblocking) { | |
984 for (index=0; index<iov_size; index++) { | |
985 amount += iov[index].iov_len; | |
986 } | |
987 } | |
988 | |
989 while ((rv = writev(osfd, (const struct iovec*)iov, iov_size)) == -1) { | |
990 err = _MD_ERRNO(); | |
991 if ((err == EAGAIN) || (err == EWOULDBLOCK)) { | |
992 if (fd->secret->nonblocking) { | |
993 break; | |
994 } | |
995 if (!_PR_IS_NATIVE_THREAD(me)) { | |
996 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) | |
997 goto done; | |
998 } else { | |
999 if ((rv = socket_io_wait(osfd, WRITE_FD, timeout))<0) | |
1000 goto done; | |
1001 } | |
1002 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
1003 continue; | |
1004 } else { | |
1005 break; | |
1006 } | |
1007 } | |
1008 /* | |
1009 * optimization; if bytes sent is less than "amount" call | |
1010 * select before returning. This is because it is likely that | |
1011 * the next writev() call will return EWOULDBLOCK. | |
1012 */ | |
1013 if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) | |
1014 && (timeout != PR_INTERVAL_NO_WAIT)) { | |
1015 if (_PR_IS_NATIVE_THREAD(me)) { | |
1016 if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) { | |
1017 rv = -1; | |
1018 goto done; | |
1019 } | |
1020 } else { | |
1021 if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) { | |
1022 rv = -1; | |
1023 goto done; | |
1024 } | |
1025 } | |
1026 } | |
1027 if (rv < 0) { | |
1028 _PR_MD_MAP_WRITEV_ERROR(err); | |
1029 } | |
1030 done: | |
1031 return(rv); | |
1032 } | |
1033 | |
1034 PRInt32 _MD_accept(PRFileDesc *fd, PRNetAddr *addr, | |
1035 PRUint32 *addrlen, PRIntervalTime timeout) | |
1036 { | |
1037 PRInt32 osfd = fd->secret->md.osfd; | |
1038 PRInt32 rv, err; | |
1039 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1040 | |
1041 while ((rv = accept(osfd, (struct sockaddr *) addr, | |
1042 (_PRSockLen_t *)addrlen)) == -1) { | |
1043 err = _MD_ERRNO(); | |
1044 if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == ECONNABORTED)) { | |
1045 if (fd->secret->nonblocking) { | |
1046 break; | |
1047 } | |
1048 if (!_PR_IS_NATIVE_THREAD(me)) { | |
1049 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) | |
1050 goto done; | |
1051 } else { | |
1052 if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) | |
1053 goto done; | |
1054 } | |
1055 } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))){ | |
1056 continue; | |
1057 } else { | |
1058 break; | |
1059 } | |
1060 } | |
1061 if (rv < 0) { | |
1062 _PR_MD_MAP_ACCEPT_ERROR(err); | |
1063 } | |
1064 done: | |
1065 #ifdef _PR_HAVE_SOCKADDR_LEN | |
1066 if (rv != -1) { | |
1067 /* ignore the sa_len field of struct sockaddr */ | |
1068 if (addr) { | |
1069 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
1070 } | |
1071 } | |
1072 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
1073 return(rv); | |
1074 } | |
1075 | |
1076 extern int _connect (int s, const struct sockaddr *name, int namelen); | |
1077 PRInt32 _MD_connect( | |
1078 PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen, PRIntervalTime timeout) | |
1079 { | |
1080 PRInt32 rv, err; | |
1081 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1082 PRInt32 osfd = fd->secret->md.osfd; | |
1083 #ifdef IRIX | |
1084 extern PRInt32 _MD_irix_connect( | |
1085 PRInt32 osfd, const PRNetAddr *addr, PRInt32 addrlen, PRIntervalTime timeout); | |
1086 #endif | |
1087 #ifdef _PR_HAVE_SOCKADDR_LEN | |
1088 PRNetAddr addrCopy; | |
1089 | |
1090 addrCopy = *addr; | |
1091 ((struct sockaddr *) &addrCopy)->sa_len = addrlen; | |
1092 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; | |
1093 #endif | |
1094 | |
1095 /* | |
1096 * We initiate the connection setup by making a nonblocking connect() | |
1097 * call. If the connect() call fails, there are two cases we handle | |
1098 * specially: | |
1099 * 1. The connect() call was interrupted by a signal. In this case | |
1100 * we simply retry connect(). | |
1101 * 2. The NSPR socket is nonblocking and connect() fails with | |
1102 * EINPROGRESS. We first wait until the socket becomes writable. | |
1103 * Then we try to find out whether the connection setup succeeded | |
1104 * or failed. | |
1105 */ | |
1106 | |
1107 retry: | |
1108 #ifdef IRIX | |
1109 if ((rv = _MD_irix_connect(osfd, addr, addrlen, timeout)) == -1) { | |
1110 #else | |
1111 #ifdef _PR_HAVE_SOCKADDR_LEN | |
1112 if ((rv = connect(osfd, (struct sockaddr *)&addrCopy, addrlen)) == -1) { | |
1113 #else | |
1114 if ((rv = connect(osfd, (struct sockaddr *)addr, addrlen)) == -1) { | |
1115 #endif | |
1116 #endif | |
1117 err = _MD_ERRNO(); | |
1118 | |
1119 if (err == EINTR) { | |
1120 if (_PR_PENDING_INTERRUPT(me)) { | |
1121 me->flags &= ~_PR_INTERRUPT; | |
1122 PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); | |
1123 return -1; | |
1124 } | |
1125 goto retry; | |
1126 } | |
1127 | |
1128 if (!fd->secret->nonblocking && (err == EINPROGRESS)) { | |
1129 if (!_PR_IS_NATIVE_THREAD(me)) { | |
1130 | |
1131 if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) | |
1132 return -1; | |
1133 } else { | |
1134 /* | |
1135 * socket_io_wait() may return -1 or 1. | |
1136 */ | |
1137 | |
1138 rv = socket_io_wait(osfd, WRITE_FD, timeout); | |
1139 if (rv == -1) { | |
1140 return -1; | |
1141 } | |
1142 } | |
1143 | |
1144 PR_ASSERT(rv == 1); | |
1145 if (_PR_PENDING_INTERRUPT(me)) { | |
1146 me->flags &= ~_PR_INTERRUPT; | |
1147 PR_SetError( PR_PENDING_INTERRUPT_ERROR, 0); | |
1148 return -1; | |
1149 } | |
1150 err = _MD_unix_get_nonblocking_connect_error(osfd); | |
1151 if (err != 0) { | |
1152 _PR_MD_MAP_CONNECT_ERROR(err); | |
1153 return -1; | |
1154 } | |
1155 return 0; | |
1156 } | |
1157 | |
1158 _PR_MD_MAP_CONNECT_ERROR(err); | |
1159 } | |
1160 | |
1161 return rv; | |
1162 } /* _MD_connect */ | |
1163 | |
1164 PRInt32 _MD_bind(PRFileDesc *fd, const PRNetAddr *addr, PRUint32 addrlen) | |
1165 { | |
1166 PRInt32 rv, err; | |
1167 #ifdef _PR_HAVE_SOCKADDR_LEN | |
1168 PRNetAddr addrCopy; | |
1169 | |
1170 addrCopy = *addr; | |
1171 ((struct sockaddr *) &addrCopy)->sa_len = addrlen; | |
1172 ((struct sockaddr *) &addrCopy)->sa_family = addr->raw.family; | |
1173 rv = bind(fd->secret->md.osfd, (struct sockaddr *) &addrCopy, (int )addrlen); | |
1174 #else | |
1175 rv = bind(fd->secret->md.osfd, (struct sockaddr *) addr, (int )addrlen); | |
1176 #endif | |
1177 if (rv < 0) { | |
1178 err = _MD_ERRNO(); | |
1179 _PR_MD_MAP_BIND_ERROR(err); | |
1180 } | |
1181 return(rv); | |
1182 } | |
1183 | |
1184 PRInt32 _MD_listen(PRFileDesc *fd, PRIntn backlog) | |
1185 { | |
1186 PRInt32 rv, err; | |
1187 | |
1188 rv = listen(fd->secret->md.osfd, backlog); | |
1189 if (rv < 0) { | |
1190 err = _MD_ERRNO(); | |
1191 _PR_MD_MAP_LISTEN_ERROR(err); | |
1192 } | |
1193 return(rv); | |
1194 } | |
1195 | |
1196 PRInt32 _MD_shutdown(PRFileDesc *fd, PRIntn how) | |
1197 { | |
1198 PRInt32 rv, err; | |
1199 | |
1200 rv = shutdown(fd->secret->md.osfd, how); | |
1201 if (rv < 0) { | |
1202 err = _MD_ERRNO(); | |
1203 _PR_MD_MAP_SHUTDOWN_ERROR(err); | |
1204 } | |
1205 return(rv); | |
1206 } | |
1207 | |
1208 PRInt32 _MD_socketpair(int af, int type, int flags, | |
1209 PRInt32 *osfd) | |
1210 { | |
1211 PRInt32 rv, err; | |
1212 | |
1213 rv = socketpair(af, type, flags, osfd); | |
1214 if (rv < 0) { | |
1215 err = _MD_ERRNO(); | |
1216 _PR_MD_MAP_SOCKETPAIR_ERROR(err); | |
1217 } | |
1218 return rv; | |
1219 } | |
1220 | |
1221 PRStatus _MD_getsockname(PRFileDesc *fd, PRNetAddr *addr, | |
1222 PRUint32 *addrlen) | |
1223 { | |
1224 PRInt32 rv, err; | |
1225 | |
1226 rv = getsockname(fd->secret->md.osfd, | |
1227 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); | |
1228 #ifdef _PR_HAVE_SOCKADDR_LEN | |
1229 if (rv == 0) { | |
1230 /* ignore the sa_len field of struct sockaddr */ | |
1231 if (addr) { | |
1232 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
1233 } | |
1234 } | |
1235 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
1236 if (rv < 0) { | |
1237 err = _MD_ERRNO(); | |
1238 _PR_MD_MAP_GETSOCKNAME_ERROR(err); | |
1239 } | |
1240 return rv==0?PR_SUCCESS:PR_FAILURE; | |
1241 } | |
1242 | |
1243 PRStatus _MD_getpeername(PRFileDesc *fd, PRNetAddr *addr, | |
1244 PRUint32 *addrlen) | |
1245 { | |
1246 PRInt32 rv, err; | |
1247 | |
1248 rv = getpeername(fd->secret->md.osfd, | |
1249 (struct sockaddr *) addr, (_PRSockLen_t *)addrlen); | |
1250 #ifdef _PR_HAVE_SOCKADDR_LEN | |
1251 if (rv == 0) { | |
1252 /* ignore the sa_len field of struct sockaddr */ | |
1253 if (addr) { | |
1254 addr->raw.family = ((struct sockaddr *) addr)->sa_family; | |
1255 } | |
1256 } | |
1257 #endif /* _PR_HAVE_SOCKADDR_LEN */ | |
1258 if (rv < 0) { | |
1259 err = _MD_ERRNO(); | |
1260 _PR_MD_MAP_GETPEERNAME_ERROR(err); | |
1261 } | |
1262 return rv==0?PR_SUCCESS:PR_FAILURE; | |
1263 } | |
1264 | |
1265 PRStatus _MD_getsockopt(PRFileDesc *fd, PRInt32 level, | |
1266 PRInt32 optname, char* optval, PRInt32* optlen) | |
1267 { | |
1268 PRInt32 rv, err; | |
1269 | |
1270 rv = getsockopt(fd->secret->md.osfd, level, optname, optval, (_PRSockLen_t *)optlen); | |
1271 if (rv < 0) { | |
1272 err = _MD_ERRNO(); | |
1273 _PR_MD_MAP_GETSOCKOPT_ERROR(err); | |
1274 } | |
1275 return rv==0?PR_SUCCESS:PR_FAILURE; | |
1276 } | |
1277 | |
1278 PRStatus _MD_setsockopt(PRFileDesc *fd, PRInt32 level, | |
1279 PRInt32 optname, const char* optval, PRInt32 optlen) | |
1280 { | |
1281 PRInt32 rv, err; | |
1282 | |
1283 rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen); | |
1284 if (rv < 0) { | |
1285 err = _MD_ERRNO(); | |
1286 _PR_MD_MAP_SETSOCKOPT_ERROR(err); | |
1287 } | |
1288 return rv==0?PR_SUCCESS:PR_FAILURE; | |
1289 } | |
1290 | |
1291 PRStatus _MD_set_fd_inheritable(PRFileDesc *fd, PRBool inheritable) | |
1292 { | |
1293 int rv; | |
1294 | |
1295 rv = fcntl(fd->secret->md.osfd, F_SETFD, inheritable ? 0 : FD_CLOEXEC); | |
1296 if (-1 == rv) { | |
1297 PR_SetError(PR_UNKNOWN_ERROR, _MD_ERRNO()); | |
1298 return PR_FAILURE; | |
1299 } | |
1300 return PR_SUCCESS; | |
1301 } | |
1302 | |
1303 void _MD_init_fd_inheritable(PRFileDesc *fd, PRBool imported) | |
1304 { | |
1305 if (imported) { | |
1306 fd->secret->inheritable = _PR_TRI_UNKNOWN; | |
1307 } else { | |
1308 /* By default, a Unix fd is not closed on exec. */ | |
1309 #ifdef DEBUG | |
1310 { | |
1311 int flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); | |
1312 PR_ASSERT(0 == flags); | |
1313 } | |
1314 #endif | |
1315 fd->secret->inheritable = _PR_TRI_TRUE; | |
1316 } | |
1317 } | |
1318 | |
1319 /************************************************************************/ | |
1320 #if !defined(_PR_USE_POLL) | |
1321 | |
1322 /* | |
1323 ** Scan through io queue and find any bad fd's that triggered the error | |
1324 ** from _MD_SELECT | |
1325 */ | |
1326 static void FindBadFDs(void) | |
1327 { | |
1328 PRCList *q; | |
1329 PRThread *me = _MD_CURRENT_THREAD(); | |
1330 | |
1331 PR_ASSERT(!_PR_IS_NATIVE_THREAD(me)); | |
1332 q = (_PR_IOQ(me->cpu)).next; | |
1333 _PR_IOQ_MAX_OSFD(me->cpu) = -1; | |
1334 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; | |
1335 while (q != &_PR_IOQ(me->cpu)) { | |
1336 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
1337 PRBool notify = PR_FALSE; | |
1338 _PRUnixPollDesc *pds = pq->pds; | |
1339 _PRUnixPollDesc *epds = pds + pq->npds; | |
1340 PRInt32 pq_max_osfd = -1; | |
1341 | |
1342 q = q->next; | |
1343 for (; pds < epds; pds++) { | |
1344 PRInt32 osfd = pds->osfd; | |
1345 pds->out_flags = 0; | |
1346 PR_ASSERT(osfd >= 0 || pds->in_flags == 0); | |
1347 if (pds->in_flags == 0) { | |
1348 continue; /* skip this fd */ | |
1349 } | |
1350 if (fcntl(osfd, F_GETFL, 0) == -1) { | |
1351 /* Found a bad descriptor, remove it from the fd_sets. */ | |
1352 PR_LOG(_pr_io_lm, PR_LOG_MAX, | |
1353 ("file descriptor %d is bad", osfd)); | |
1354 pds->out_flags = _PR_UNIX_POLL_NVAL; | |
1355 notify = PR_TRUE; | |
1356 } | |
1357 if (osfd > pq_max_osfd) { | |
1358 pq_max_osfd = osfd; | |
1359 } | |
1360 } | |
1361 | |
1362 if (notify) { | |
1363 PRIntn pri; | |
1364 PR_REMOVE_LINK(&pq->links); | |
1365 pq->on_ioq = PR_FALSE; | |
1366 | |
1367 /* | |
1368 * Decrement the count of descriptors for each desciptor/event | |
1369 * because this I/O request is being removed from the | |
1370 * ioq | |
1371 */ | |
1372 pds = pq->pds; | |
1373 for (; pds < epds; pds++) { | |
1374 PRInt32 osfd = pds->osfd; | |
1375 PRInt16 in_flags = pds->in_flags; | |
1376 PR_ASSERT(osfd >= 0 || in_flags == 0); | |
1377 if (in_flags & _PR_UNIX_POLL_READ) { | |
1378 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) | |
1379 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); | |
1380 } | |
1381 if (in_flags & _PR_UNIX_POLL_WRITE) { | |
1382 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) | |
1383 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
1384 } | |
1385 if (in_flags & _PR_UNIX_POLL_EXCEPT) { | |
1386 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) | |
1387 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
1388 } | |
1389 } | |
1390 | |
1391 _PR_THREAD_LOCK(pq->thr); | |
1392 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
1393 _PRCPU *cpu = pq->thr->cpu; | |
1394 _PR_SLEEPQ_LOCK(pq->thr->cpu); | |
1395 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); | |
1396 _PR_SLEEPQ_UNLOCK(pq->thr->cpu); | |
1397 | |
1398 if (pq->thr->flags & _PR_SUSPENDING) { | |
1399 /* | |
1400 * set thread state to SUSPENDED; | |
1401 * a Resume operation on the thread | |
1402 * will move it to the runQ | |
1403 */ | |
1404 pq->thr->state = _PR_SUSPENDED; | |
1405 _PR_MISCQ_LOCK(pq->thr->cpu); | |
1406 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); | |
1407 _PR_MISCQ_UNLOCK(pq->thr->cpu); | |
1408 } else { | |
1409 pri = pq->thr->priority; | |
1410 pq->thr->state = _PR_RUNNABLE; | |
1411 | |
1412 _PR_RUNQ_LOCK(cpu); | |
1413 _PR_ADD_RUNQ(pq->thr, cpu, pri); | |
1414 _PR_RUNQ_UNLOCK(cpu); | |
1415 } | |
1416 } | |
1417 _PR_THREAD_UNLOCK(pq->thr); | |
1418 } else { | |
1419 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) | |
1420 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; | |
1421 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) | |
1422 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; | |
1423 } | |
1424 } | |
1425 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
1426 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) | |
1427 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
1428 } | |
1429 } | |
1430 #endif /* !defined(_PR_USE_POLL) */ | |
1431 | |
1432 /************************************************************************/ | |
1433 | |
1434 /* | |
1435 ** Called by the scheduler when there is nothing to do. This means that | |
1436 ** all threads are blocked on some monitor somewhere. | |
1437 ** | |
1438 ** Note: this code doesn't release the scheduler lock. | |
1439 */ | |
1440 /* | |
1441 ** Pause the current CPU. longjmp to the cpu's pause stack | |
1442 ** | |
1443 ** This must be called with the scheduler locked | |
1444 */ | |
1445 void _MD_PauseCPU(PRIntervalTime ticks) | |
1446 { | |
1447 PRThread *me = _MD_CURRENT_THREAD(); | |
1448 #ifdef _PR_USE_POLL | |
1449 int timeout; | |
1450 struct pollfd *pollfds; /* an array of pollfd structures */ | |
1451 struct pollfd *pollfdPtr; /* a pointer that steps through the array */ | |
1452 unsigned long npollfds; /* number of pollfd structures in array */ | |
1453 unsigned long pollfds_size; | |
1454 int nfd; /* to hold the return value of poll() */ | |
1455 #else | |
1456 struct timeval timeout, *tvp; | |
1457 fd_set r, w, e; | |
1458 fd_set *rp, *wp, *ep; | |
1459 PRInt32 max_osfd, nfd; | |
1460 #endif /* _PR_USE_POLL */ | |
1461 PRInt32 rv; | |
1462 PRCList *q; | |
1463 PRUint32 min_timeout; | |
1464 sigset_t oldset; | |
1465 #ifdef IRIX | |
1466 extern sigset_t ints_off; | |
1467 #endif | |
1468 | |
1469 PR_ASSERT(_PR_MD_GET_INTSOFF() != 0); | |
1470 | |
1471 _PR_MD_IOQ_LOCK(); | |
1472 | |
1473 #ifdef _PR_USE_POLL | |
1474 /* Build up the pollfd structure array to wait on */ | |
1475 | |
1476 /* Find out how many pollfd structures are needed */ | |
1477 npollfds = _PR_IOQ_OSFD_CNT(me->cpu); | |
1478 PR_ASSERT(npollfds >= 0); | |
1479 | |
1480 /* | |
1481 * We use a pipe to wake up a native thread. An fd is needed | |
1482 * for the pipe and we poll it for reading. | |
1483 */ | |
1484 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
1485 npollfds++; | |
1486 #ifdef IRIX | |
1487 /* | |
1488 * On Irix, a second pipe is used to cause the primordial cpu to | |
1489 * wakeup and exit, when the process is exiting because of a call | |
1490 * to exit/PR_ProcessExit. | |
1491 */ | |
1492 if (me->cpu->id == 0) { | |
1493 npollfds++; | |
1494 } | |
1495 #endif | |
1496 } | |
1497 | |
1498 /* | |
1499 * if the cpu's pollfd array is not big enough, release it and allocate a new one | |
1500 */ | |
1501 if (npollfds > _PR_IOQ_POLLFDS_SIZE(me->cpu)) { | |
1502 if (_PR_IOQ_POLLFDS(me->cpu) != NULL) | |
1503 PR_DELETE(_PR_IOQ_POLLFDS(me->cpu)); | |
1504 pollfds_size = PR_MAX(_PR_IOQ_MIN_POLLFDS_SIZE(me->cpu), npollfds); | |
1505 pollfds = (struct pollfd *) PR_MALLOC(pollfds_size * sizeof(struct pollfd)); | |
1506 _PR_IOQ_POLLFDS(me->cpu) = pollfds; | |
1507 _PR_IOQ_POLLFDS_SIZE(me->cpu) = pollfds_size; | |
1508 } else { | |
1509 pollfds = _PR_IOQ_POLLFDS(me->cpu); | |
1510 } | |
1511 pollfdPtr = pollfds; | |
1512 | |
1513 /* | |
1514 * If we need to poll the pipe for waking up a native thread, | |
1515 * the pipe's fd is the first element in the pollfds array. | |
1516 */ | |
1517 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
1518 pollfdPtr->fd = _pr_md_pipefd[0]; | |
1519 pollfdPtr->events = POLLIN; | |
1520 pollfdPtr++; | |
1521 #ifdef IRIX | |
1522 /* | |
1523 * On Irix, the second element is the exit pipe | |
1524 */ | |
1525 if (me->cpu->id == 0) { | |
1526 pollfdPtr->fd = _pr_irix_primoridal_cpu_fd[0]; | |
1527 pollfdPtr->events = POLLIN; | |
1528 pollfdPtr++; | |
1529 } | |
1530 #endif | |
1531 } | |
1532 | |
1533 min_timeout = PR_INTERVAL_NO_TIMEOUT; | |
1534 for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { | |
1535 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
1536 _PRUnixPollDesc *pds = pq->pds; | |
1537 _PRUnixPollDesc *epds = pds + pq->npds; | |
1538 | |
1539 if (pq->timeout < min_timeout) { | |
1540 min_timeout = pq->timeout; | |
1541 } | |
1542 for (; pds < epds; pds++, pollfdPtr++) { | |
1543 /* | |
1544 * Assert that the pollfdPtr pointer does not go | |
1545 * beyond the end of the pollfds array | |
1546 */ | |
1547 PR_ASSERT(pollfdPtr < pollfds + npollfds); | |
1548 pollfdPtr->fd = pds->osfd; | |
1549 /* direct copy of poll flags */ | |
1550 pollfdPtr->events = pds->in_flags; | |
1551 } | |
1552 } | |
1553 _PR_IOQ_TIMEOUT(me->cpu) = min_timeout; | |
1554 #else | |
1555 /* | |
1556 * assigment of fd_sets | |
1557 */ | |
1558 r = _PR_FD_READ_SET(me->cpu); | |
1559 w = _PR_FD_WRITE_SET(me->cpu); | |
1560 e = _PR_FD_EXCEPTION_SET(me->cpu); | |
1561 | |
1562 rp = &r; | |
1563 wp = &w; | |
1564 ep = &e; | |
1565 | |
1566 max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1; | |
1567 min_timeout = _PR_IOQ_TIMEOUT(me->cpu); | |
1568 #endif /* _PR_USE_POLL */ | |
1569 /* | |
1570 ** Compute the minimum timeout value: make it the smaller of the | |
1571 ** timeouts specified by the i/o pollers or the timeout of the first | |
1572 ** sleeping thread. | |
1573 */ | |
1574 q = _PR_SLEEPQ(me->cpu).next; | |
1575 | |
1576 if (q != &_PR_SLEEPQ(me->cpu)) { | |
1577 PRThread *t = _PR_THREAD_PTR(q); | |
1578 | |
1579 if (t->sleep < min_timeout) { | |
1580 min_timeout = t->sleep; | |
1581 } | |
1582 } | |
1583 if (min_timeout > ticks) { | |
1584 min_timeout = ticks; | |
1585 } | |
1586 | |
1587 #ifdef _PR_USE_POLL | |
1588 if (min_timeout == PR_INTERVAL_NO_TIMEOUT) | |
1589 timeout = -1; | |
1590 else | |
1591 timeout = PR_IntervalToMilliseconds(min_timeout); | |
1592 #else | |
1593 if (min_timeout == PR_INTERVAL_NO_TIMEOUT) { | |
1594 tvp = NULL; | |
1595 } else { | |
1596 timeout.tv_sec = PR_IntervalToSeconds(min_timeout); | |
1597 timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout) | |
1598 % PR_USEC_PER_SEC; | |
1599 tvp = &timeout; | |
1600 } | |
1601 #endif /* _PR_USE_POLL */ | |
1602 | |
1603 _PR_MD_IOQ_UNLOCK(); | |
1604 _MD_CHECK_FOR_EXIT(); | |
1605 /* | |
1606 * check for i/o operations | |
1607 */ | |
1608 #ifndef _PR_NO_CLOCK_TIMER | |
1609 /* | |
1610 * Disable the clock interrupts while we are in select, if clock interrupts | |
1611 * are enabled. Otherwise, when the select/poll calls are interrupted, the | |
1612 * timer value starts ticking from zero again when the system call is restarted. | |
1613 */ | |
1614 #ifdef IRIX | |
1615 /* | |
1616 * SIGCHLD signal is used on Irix to detect he termination of an | |
1617 * sproc by SIGSEGV, SIGBUS or SIGABRT signals when | |
1618 * _nspr_terminate_on_error is set. | |
1619 */ | |
1620 if ((!_nspr_noclock) || (_nspr_terminate_on_error)) | |
1621 #else | |
1622 if (!_nspr_noclock) | |
1623 #endif /* IRIX */ | |
1624 #ifdef IRIX | |
1625 sigprocmask(SIG_BLOCK, &ints_off, &oldset); | |
1626 #else | |
1627 PR_ASSERT(sigismember(&timer_set, SIGALRM)); | |
1628 sigprocmask(SIG_BLOCK, &timer_set, &oldset); | |
1629 #endif /* IRIX */ | |
1630 #endif /* !_PR_NO_CLOCK_TIMER */ | |
1631 | |
1632 #ifndef _PR_USE_POLL | |
1633 PR_ASSERT(FD_ISSET(_pr_md_pipefd[0],rp)); | |
1634 nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp); | |
1635 #else | |
1636 nfd = _MD_POLL(pollfds, npollfds, timeout); | |
1637 #endif /* !_PR_USE_POLL */ | |
1638 | |
1639 #ifndef _PR_NO_CLOCK_TIMER | |
1640 #ifdef IRIX | |
1641 if ((!_nspr_noclock) || (_nspr_terminate_on_error)) | |
1642 #else | |
1643 if (!_nspr_noclock) | |
1644 #endif /* IRIX */ | |
1645 sigprocmask(SIG_SETMASK, &oldset, 0); | |
1646 #endif /* !_PR_NO_CLOCK_TIMER */ | |
1647 | |
1648 _MD_CHECK_FOR_EXIT(); | |
1649 | |
1650 #ifdef IRIX | |
1651 _PR_MD_primordial_cpu(); | |
1652 #endif | |
1653 | |
1654 _PR_MD_IOQ_LOCK(); | |
1655 /* | |
1656 ** Notify monitors that are associated with the selected descriptors. | |
1657 */ | |
1658 #ifdef _PR_USE_POLL | |
1659 if (nfd > 0) { | |
1660 pollfdPtr = pollfds; | |
1661 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
1662 /* | |
1663 * Assert that the pipe is the first element in the | |
1664 * pollfds array. | |
1665 */ | |
1666 PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]); | |
1667 if ((pollfds[0].revents & POLLIN) && (nfd == 1)) { | |
1668 /* | |
1669 * woken up by another thread; read all the data | |
1670 * in the pipe to empty the pipe | |
1671 */ | |
1672 while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf, | |
1673 PIPE_BUF)) == PIPE_BUF){ | |
1674 } | |
1675 PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN))); | |
1676 } | |
1677 pollfdPtr++; | |
1678 #ifdef IRIX | |
1679 /* | |
1680 * On Irix, check to see if the primordial cpu needs to exit | |
1681 * to cause the process to terminate | |
1682 */ | |
1683 if (me->cpu->id == 0) { | |
1684 PR_ASSERT(pollfds[1].fd == _pr_irix_primoridal_cpu_fd[0]); | |
1685 if (pollfdPtr->revents & POLLIN) { | |
1686 if (_pr_irix_process_exit) { | |
1687 /* | |
1688 * process exit due to a call to PR_ProcessExit | |
1689 */ | |
1690 prctl(PR_SETEXITSIG, SIGKILL); | |
1691 _exit(_pr_irix_process_exit_code); | |
1692 } else { | |
1693 while ((rv = read(_pr_irix_primoridal_cpu_fd[0], | |
1694 _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) { | |
1695 } | |
1696 PR_ASSERT(rv > 0); | |
1697 } | |
1698 } | |
1699 pollfdPtr++; | |
1700 } | |
1701 #endif | |
1702 } | |
1703 for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) { | |
1704 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
1705 PRBool notify = PR_FALSE; | |
1706 _PRUnixPollDesc *pds = pq->pds; | |
1707 _PRUnixPollDesc *epds = pds + pq->npds; | |
1708 | |
1709 for (; pds < epds; pds++, pollfdPtr++) { | |
1710 /* | |
1711 * Assert that the pollfdPtr pointer does not go beyond | |
1712 * the end of the pollfds array. | |
1713 */ | |
1714 PR_ASSERT(pollfdPtr < pollfds + npollfds); | |
1715 /* | |
1716 * Assert that the fd's in the pollfds array (stepped | |
1717 * through by pollfdPtr) are in the same order as | |
1718 * the fd's in _PR_IOQ() (stepped through by q and pds). | |
1719 * This is how the pollfds array was created earlier. | |
1720 */ | |
1721 PR_ASSERT(pollfdPtr->fd == pds->osfd); | |
1722 pds->out_flags = pollfdPtr->revents; | |
1723 /* Negative fd's are ignored by poll() */ | |
1724 if (pds->osfd >= 0 && pds->out_flags) { | |
1725 notify = PR_TRUE; | |
1726 } | |
1727 } | |
1728 if (notify) { | |
1729 PRIntn pri; | |
1730 PRThread *thred; | |
1731 | |
1732 PR_REMOVE_LINK(&pq->links); | |
1733 pq->on_ioq = PR_FALSE; | |
1734 | |
1735 thred = pq->thr; | |
1736 _PR_THREAD_LOCK(thred); | |
1737 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
1738 _PRCPU *cpu = pq->thr->cpu; | |
1739 _PR_SLEEPQ_LOCK(pq->thr->cpu); | |
1740 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); | |
1741 _PR_SLEEPQ_UNLOCK(pq->thr->cpu); | |
1742 | |
1743 if (pq->thr->flags & _PR_SUSPENDING) { | |
1744 /* | |
1745 * set thread state to SUSPENDED; | |
1746 * a Resume operation on the thread | |
1747 * will move it to the runQ | |
1748 */ | |
1749 pq->thr->state = _PR_SUSPENDED; | |
1750 _PR_MISCQ_LOCK(pq->thr->cpu); | |
1751 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); | |
1752 _PR_MISCQ_UNLOCK(pq->thr->cpu); | |
1753 } else { | |
1754 pri = pq->thr->priority; | |
1755 pq->thr->state = _PR_RUNNABLE; | |
1756 | |
1757 _PR_RUNQ_LOCK(cpu); | |
1758 _PR_ADD_RUNQ(pq->thr, cpu, pri); | |
1759 _PR_RUNQ_UNLOCK(cpu); | |
1760 if (_pr_md_idle_cpus > 1) | |
1761 _PR_MD_WAKEUP_WAITER(thred); | |
1762 } | |
1763 } | |
1764 _PR_THREAD_UNLOCK(thred); | |
1765 _PR_IOQ_OSFD_CNT(me->cpu) -= pq->npds; | |
1766 PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); | |
1767 } | |
1768 } | |
1769 } else if (nfd == -1) { | |
1770 PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno)); | |
1771 } | |
1772 | |
1773 #else | |
1774 if (nfd > 0) { | |
1775 q = _PR_IOQ(me->cpu).next; | |
1776 _PR_IOQ_MAX_OSFD(me->cpu) = -1; | |
1777 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; | |
1778 while (q != &_PR_IOQ(me->cpu)) { | |
1779 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
1780 PRBool notify = PR_FALSE; | |
1781 _PRUnixPollDesc *pds = pq->pds; | |
1782 _PRUnixPollDesc *epds = pds + pq->npds; | |
1783 PRInt32 pq_max_osfd = -1; | |
1784 | |
1785 q = q->next; | |
1786 for (; pds < epds; pds++) { | |
1787 PRInt32 osfd = pds->osfd; | |
1788 PRInt16 in_flags = pds->in_flags; | |
1789 PRInt16 out_flags = 0; | |
1790 PR_ASSERT(osfd >= 0 || in_flags == 0); | |
1791 if ((in_flags & _PR_UNIX_POLL_READ) && FD_ISSET(osfd, rp)) { | |
1792 out_flags |= _PR_UNIX_POLL_READ; | |
1793 } | |
1794 if ((in_flags & _PR_UNIX_POLL_WRITE) && FD_ISSET(osfd, wp)) { | |
1795 out_flags |= _PR_UNIX_POLL_WRITE; | |
1796 } | |
1797 if ((in_flags & _PR_UNIX_POLL_EXCEPT) && FD_ISSET(osfd, ep)) { | |
1798 out_flags |= _PR_UNIX_POLL_EXCEPT; | |
1799 } | |
1800 pds->out_flags = out_flags; | |
1801 if (out_flags) { | |
1802 notify = PR_TRUE; | |
1803 } | |
1804 if (osfd > pq_max_osfd) { | |
1805 pq_max_osfd = osfd; | |
1806 } | |
1807 } | |
1808 if (notify == PR_TRUE) { | |
1809 PRIntn pri; | |
1810 PRThread *thred; | |
1811 | |
1812 PR_REMOVE_LINK(&pq->links); | |
1813 pq->on_ioq = PR_FALSE; | |
1814 | |
1815 /* | |
1816 * Decrement the count of descriptors for each desciptor/event | |
1817 * because this I/O request is being removed from the | |
1818 * ioq | |
1819 */ | |
1820 pds = pq->pds; | |
1821 for (; pds < epds; pds++) { | |
1822 PRInt32 osfd = pds->osfd; | |
1823 PRInt16 in_flags = pds->in_flags; | |
1824 PR_ASSERT(osfd >= 0 || in_flags == 0); | |
1825 if (in_flags & _PR_UNIX_POLL_READ) { | |
1826 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) | |
1827 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); | |
1828 } | |
1829 if (in_flags & _PR_UNIX_POLL_WRITE) { | |
1830 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) | |
1831 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
1832 } | |
1833 if (in_flags & _PR_UNIX_POLL_EXCEPT) { | |
1834 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) | |
1835 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
1836 } | |
1837 } | |
1838 | |
1839 /* | |
1840 * Because this thread can run on a different cpu right | |
1841 * after being added to the run queue, do not dereference | |
1842 * pq | |
1843 */ | |
1844 thred = pq->thr; | |
1845 _PR_THREAD_LOCK(thred); | |
1846 if (pq->thr->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
1847 _PRCPU *cpu = thred->cpu; | |
1848 _PR_SLEEPQ_LOCK(pq->thr->cpu); | |
1849 _PR_DEL_SLEEPQ(pq->thr, PR_TRUE); | |
1850 _PR_SLEEPQ_UNLOCK(pq->thr->cpu); | |
1851 | |
1852 if (pq->thr->flags & _PR_SUSPENDING) { | |
1853 /* | |
1854 * set thread state to SUSPENDED; | |
1855 * a Resume operation on the thread | |
1856 * will move it to the runQ | |
1857 */ | |
1858 pq->thr->state = _PR_SUSPENDED; | |
1859 _PR_MISCQ_LOCK(pq->thr->cpu); | |
1860 _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu); | |
1861 _PR_MISCQ_UNLOCK(pq->thr->cpu); | |
1862 } else { | |
1863 pri = pq->thr->priority; | |
1864 pq->thr->state = _PR_RUNNABLE; | |
1865 | |
1866 pq->thr->cpu = cpu; | |
1867 _PR_RUNQ_LOCK(cpu); | |
1868 _PR_ADD_RUNQ(pq->thr, cpu, pri); | |
1869 _PR_RUNQ_UNLOCK(cpu); | |
1870 if (_pr_md_idle_cpus > 1) | |
1871 _PR_MD_WAKEUP_WAITER(thred); | |
1872 } | |
1873 } | |
1874 _PR_THREAD_UNLOCK(thred); | |
1875 } else { | |
1876 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) | |
1877 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; | |
1878 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) | |
1879 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; | |
1880 } | |
1881 } | |
1882 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
1883 if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) { | |
1884 /* | |
1885 * woken up by another thread; read all the data | |
1886 * in the pipe to empty the pipe | |
1887 */ | |
1888 while ((rv = | |
1889 read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) | |
1890 == PIPE_BUF){ | |
1891 } | |
1892 PR_ASSERT((rv > 0) || | |
1893 ((rv == -1) && (errno == EAGAIN))); | |
1894 } | |
1895 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) | |
1896 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
1897 #ifdef IRIX | |
1898 if ((me->cpu->id == 0) && | |
1899 (FD_ISSET(_pr_irix_primoridal_cpu_fd[0], rp))) { | |
1900 if (_pr_irix_process_exit) { | |
1901 /* | |
1902 * process exit due to a call to PR_ProcessExit | |
1903 */ | |
1904 prctl(PR_SETEXITSIG, SIGKILL); | |
1905 _exit(_pr_irix_process_exit_code); | |
1906 } else { | |
1907 while ((rv = read(_pr_irix_primoridal_cpu_fd[0], | |
1908 _pr_md_pipebuf, PIPE_BUF)) == PIPE_BUF) { | |
1909 } | |
1910 PR_ASSERT(rv > 0); | |
1911 } | |
1912 } | |
1913 if (me->cpu->id == 0) { | |
1914 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_irix_primoridal_cpu_fd[0]) | |
1915 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_irix_primoridal_cpu_fd[0]; | |
1916 } | |
1917 #endif | |
1918 } | |
1919 } else if (nfd < 0) { | |
1920 if (errno == EBADF) { | |
1921 FindBadFDs(); | |
1922 } else { | |
1923 PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d", | |
1924 errno)); | |
1925 } | |
1926 } else { | |
1927 PR_ASSERT(nfd == 0); | |
1928 /* | |
1929 * compute the new value of _PR_IOQ_TIMEOUT | |
1930 */ | |
1931 q = _PR_IOQ(me->cpu).next; | |
1932 _PR_IOQ_MAX_OSFD(me->cpu) = -1; | |
1933 _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT; | |
1934 while (q != &_PR_IOQ(me->cpu)) { | |
1935 PRPollQueue *pq = _PR_POLLQUEUE_PTR(q); | |
1936 _PRUnixPollDesc *pds = pq->pds; | |
1937 _PRUnixPollDesc *epds = pds + pq->npds; | |
1938 PRInt32 pq_max_osfd = -1; | |
1939 | |
1940 q = q->next; | |
1941 for (; pds < epds; pds++) { | |
1942 if (pds->osfd > pq_max_osfd) { | |
1943 pq_max_osfd = pds->osfd; | |
1944 } | |
1945 } | |
1946 if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) | |
1947 _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout; | |
1948 if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) | |
1949 _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd; | |
1950 } | |
1951 if (_PR_IS_NATIVE_THREAD_SUPPORTED()) { | |
1952 if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) | |
1953 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
1954 } | |
1955 } | |
1956 #endif /* _PR_USE_POLL */ | |
1957 _PR_MD_IOQ_UNLOCK(); | |
1958 } | |
1959 | |
1960 void _MD_Wakeup_CPUs() | |
1961 { | |
1962 PRInt32 rv, data; | |
1963 | |
1964 data = 0; | |
1965 rv = write(_pr_md_pipefd[1], &data, 1); | |
1966 | |
1967 while ((rv < 0) && (errno == EAGAIN)) { | |
1968 /* | |
1969 * pipe full, read all data in pipe to empty it | |
1970 */ | |
1971 while ((rv = | |
1972 read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) | |
1973 == PIPE_BUF) { | |
1974 } | |
1975 PR_ASSERT((rv > 0) || | |
1976 ((rv == -1) && (errno == EAGAIN))); | |
1977 rv = write(_pr_md_pipefd[1], &data, 1); | |
1978 } | |
1979 } | |
1980 | |
1981 | |
1982 void _MD_InitCPUS() | |
1983 { | |
1984 PRInt32 rv, flags; | |
1985 PRThread *me = _MD_CURRENT_THREAD(); | |
1986 | |
1987 rv = pipe(_pr_md_pipefd); | |
1988 PR_ASSERT(rv == 0); | |
1989 _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0]; | |
1990 #ifndef _PR_USE_POLL | |
1991 FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu)); | |
1992 #endif | |
1993 | |
1994 flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0); | |
1995 fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK); | |
1996 flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0); | |
1997 fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK); | |
1998 } | |
1999 | |
2000 /* | |
2001 ** Unix SIGALRM (clock) signal handler | |
2002 */ | |
2003 static void ClockInterruptHandler() | |
2004 { | |
2005 int olderrno; | |
2006 PRUintn pri; | |
2007 _PRCPU *cpu = _PR_MD_CURRENT_CPU(); | |
2008 PRThread *me = _MD_CURRENT_THREAD(); | |
2009 | |
2010 #ifdef SOLARIS | |
2011 if (!me || _PR_IS_NATIVE_THREAD(me)) { | |
2012 _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOCK; | |
2013 return; | |
2014 } | |
2015 #endif | |
2016 | |
2017 if (_PR_MD_GET_INTSOFF() != 0) { | |
2018 cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK; | |
2019 return; | |
2020 } | |
2021 _PR_MD_SET_INTSOFF(1); | |
2022 | |
2023 olderrno = errno; | |
2024 _PR_ClockInterrupt(); | |
2025 errno = olderrno; | |
2026 | |
2027 /* | |
2028 ** If the interrupt wants a resched or if some other thread at | |
2029 ** the same priority needs the cpu, reschedule. | |
2030 */ | |
2031 pri = me->priority; | |
2032 if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) { | |
2033 #ifdef _PR_NO_PREEMPT | |
2034 cpu->resched = PR_TRUE; | |
2035 if (pr_interruptSwitchHook) { | |
2036 (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg); | |
2037 } | |
2038 #else /* _PR_NO_PREEMPT */ | |
2039 /* | |
2040 ** Re-enable unix interrupts (so that we can use | |
2041 ** setjmp/longjmp for context switching without having to | |
2042 ** worry about the signal state) | |
2043 */ | |
2044 sigprocmask(SIG_SETMASK, &empty_set, 0); | |
2045 PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch")); | |
2046 | |
2047 if(!(me->flags & _PR_IDLE_THREAD)) { | |
2048 _PR_THREAD_LOCK(me); | |
2049 me->state = _PR_RUNNABLE; | |
2050 me->cpu = cpu; | |
2051 _PR_RUNQ_LOCK(cpu); | |
2052 _PR_ADD_RUNQ(me, cpu, pri); | |
2053 _PR_RUNQ_UNLOCK(cpu); | |
2054 _PR_THREAD_UNLOCK(me); | |
2055 } else | |
2056 me->state = _PR_RUNNABLE; | |
2057 _MD_SWITCH_CONTEXT(me); | |
2058 PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch")); | |
2059 #endif /* _PR_NO_PREEMPT */ | |
2060 } | |
2061 /* | |
2062 * Because this thread could be running on a different cpu after | |
2063 * a context switch the current cpu should be accessed and the | |
2064 * value of the 'cpu' variable should not be used. | |
2065 */ | |
2066 _PR_MD_SET_INTSOFF(0); | |
2067 } | |
2068 | |
2069 /* | |
2070 * On HP-UX 9, we have to use the sigvector() interface to restart | |
2071 * interrupted system calls, because sigaction() does not have the | |
2072 * SA_RESTART flag. | |
2073 */ | |
2074 | |
2075 #ifdef HPUX9 | |
2076 static void HPUX9_ClockInterruptHandler( | |
2077 int sig, | |
2078 int code, | |
2079 struct sigcontext *scp) | |
2080 { | |
2081 ClockInterruptHandler(); | |
2082 scp->sc_syscall_action = SIG_RESTART; | |
2083 } | |
2084 #endif /* HPUX9 */ | |
2085 | |
2086 /* # of milliseconds per clock tick that we will use */ | |
2087 #define MSEC_PER_TICK 50 | |
2088 | |
2089 | |
2090 void _MD_StartInterrupts() | |
2091 { | |
2092 char *eval; | |
2093 | |
2094 if ((eval = getenv("NSPR_NOCLOCK")) != NULL) { | |
2095 if (atoi(eval) == 0) | |
2096 _nspr_noclock = 0; | |
2097 else | |
2098 _nspr_noclock = 1; | |
2099 } | |
2100 | |
2101 #ifndef _PR_NO_CLOCK_TIMER | |
2102 if (!_nspr_noclock) { | |
2103 _MD_EnableClockInterrupts(); | |
2104 } | |
2105 #endif | |
2106 } | |
2107 | |
2108 void _MD_StopInterrupts() | |
2109 { | |
2110 sigprocmask(SIG_BLOCK, &timer_set, 0); | |
2111 } | |
2112 | |
2113 void _MD_EnableClockInterrupts() | |
2114 { | |
2115 struct itimerval itval; | |
2116 extern PRUintn _pr_numCPU; | |
2117 #ifdef HPUX9 | |
2118 struct sigvec vec; | |
2119 | |
2120 vec.sv_handler = (void (*)()) HPUX9_ClockInterruptHandler; | |
2121 vec.sv_mask = 0; | |
2122 vec.sv_flags = 0; | |
2123 sigvector(SIGALRM, &vec, 0); | |
2124 #else | |
2125 struct sigaction vtact; | |
2126 | |
2127 vtact.sa_handler = (void (*)()) ClockInterruptHandler; | |
2128 sigemptyset(&vtact.sa_mask); | |
2129 vtact.sa_flags = SA_RESTART; | |
2130 sigaction(SIGALRM, &vtact, 0); | |
2131 #endif /* HPUX9 */ | |
2132 | |
2133 PR_ASSERT(_pr_numCPU == 1); | |
2134 itval.it_interval.tv_sec = 0; | |
2135 itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC; | |
2136 itval.it_value = itval.it_interval; | |
2137 setitimer(ITIMER_REAL, &itval, 0); | |
2138 } | |
2139 | |
2140 void _MD_DisableClockInterrupts() | |
2141 { | |
2142 struct itimerval itval; | |
2143 extern PRUintn _pr_numCPU; | |
2144 | |
2145 PR_ASSERT(_pr_numCPU == 1); | |
2146 itval.it_interval.tv_sec = 0; | |
2147 itval.it_interval.tv_usec = 0; | |
2148 itval.it_value = itval.it_interval; | |
2149 setitimer(ITIMER_REAL, &itval, 0); | |
2150 } | |
2151 | |
2152 void _MD_BlockClockInterrupts() | |
2153 { | |
2154 sigprocmask(SIG_BLOCK, &timer_set, 0); | |
2155 } | |
2156 | |
2157 void _MD_UnblockClockInterrupts() | |
2158 { | |
2159 sigprocmask(SIG_UNBLOCK, &timer_set, 0); | |
2160 } | |
2161 | |
2162 void _MD_MakeNonblock(PRFileDesc *fd) | |
2163 { | |
2164 PRInt32 osfd = fd->secret->md.osfd; | |
2165 int flags; | |
2166 | |
2167 if (osfd <= 2) { | |
2168 /* Don't mess around with stdin, stdout or stderr */ | |
2169 return; | |
2170 } | |
2171 flags = fcntl(osfd, F_GETFL, 0); | |
2172 | |
2173 /* | |
2174 * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible. | |
2175 * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O), | |
2176 * otherwise connect() still blocks and can be interrupted by SIGALRM. | |
2177 */ | |
2178 | |
2179 fcntl(osfd, F_SETFL, flags | O_NONBLOCK); | |
2180 } | |
2181 | |
2182 PRInt32 _MD_open(const char *name, PRIntn flags, PRIntn mode) | |
2183 { | |
2184 PRInt32 osflags; | |
2185 PRInt32 rv, err; | |
2186 | |
2187 if (flags & PR_RDWR) { | |
2188 osflags = O_RDWR; | |
2189 } else if (flags & PR_WRONLY) { | |
2190 osflags = O_WRONLY; | |
2191 } else { | |
2192 osflags = O_RDONLY; | |
2193 } | |
2194 | |
2195 if (flags & PR_EXCL) | |
2196 osflags |= O_EXCL; | |
2197 if (flags & PR_APPEND) | |
2198 osflags |= O_APPEND; | |
2199 if (flags & PR_TRUNCATE) | |
2200 osflags |= O_TRUNC; | |
2201 if (flags & PR_SYNC) { | |
2202 #if defined(O_SYNC) | |
2203 osflags |= O_SYNC; | |
2204 #elif defined(O_FSYNC) | |
2205 osflags |= O_FSYNC; | |
2206 #else | |
2207 #error "Neither O_SYNC nor O_FSYNC is defined on this platform" | |
2208 #endif | |
2209 } | |
2210 | |
2211 /* | |
2212 ** On creations we hold the 'create' lock in order to enforce | |
2213 ** the semantics of PR_Rename. (see the latter for more details) | |
2214 */ | |
2215 if (flags & PR_CREATE_FILE) | |
2216 { | |
2217 osflags |= O_CREAT; | |
2218 if (NULL !=_pr_rename_lock) | |
2219 PR_Lock(_pr_rename_lock); | |
2220 } | |
2221 | |
2222 #if defined(ANDROID) | |
2223 osflags |= O_LARGEFILE; | |
2224 #endif | |
2225 | |
2226 rv = _md_iovector._open64(name, osflags, mode); | |
2227 | |
2228 if (rv < 0) { | |
2229 err = _MD_ERRNO(); | |
2230 _PR_MD_MAP_OPEN_ERROR(err); | |
2231 } | |
2232 | |
2233 if ((flags & PR_CREATE_FILE) && (NULL !=_pr_rename_lock)) | |
2234 PR_Unlock(_pr_rename_lock); | |
2235 return rv; | |
2236 } | |
2237 | |
2238 PRIntervalTime intr_timeout_ticks; | |
2239 | |
2240 #if defined(SOLARIS) || defined(IRIX) | |
2241 static void sigsegvhandler() { | |
2242 fprintf(stderr,"Received SIGSEGV\n"); | |
2243 fflush(stderr); | |
2244 pause(); | |
2245 } | |
2246 | |
2247 static void sigaborthandler() { | |
2248 fprintf(stderr,"Received SIGABRT\n"); | |
2249 fflush(stderr); | |
2250 pause(); | |
2251 } | |
2252 | |
2253 static void sigbushandler() { | |
2254 fprintf(stderr,"Received SIGBUS\n"); | |
2255 fflush(stderr); | |
2256 pause(); | |
2257 } | |
2258 #endif /* SOLARIS, IRIX */ | |
2259 | |
2260 #endif /* !defined(_PR_PTHREADS) */ | |
2261 | |
2262 void _MD_query_fd_inheritable(PRFileDesc *fd) | |
2263 { | |
2264 int flags; | |
2265 | |
2266 PR_ASSERT(_PR_TRI_UNKNOWN == fd->secret->inheritable); | |
2267 flags = fcntl(fd->secret->md.osfd, F_GETFD, 0); | |
2268 PR_ASSERT(-1 != flags); | |
2269 fd->secret->inheritable = (flags & FD_CLOEXEC) ? | |
2270 _PR_TRI_FALSE : _PR_TRI_TRUE; | |
2271 } | |
2272 | |
2273 PROffset32 _MD_lseek(PRFileDesc *fd, PROffset32 offset, PRSeekWhence whence) | |
2274 { | |
2275 PROffset32 rv, where; | |
2276 | |
2277 switch (whence) { | |
2278 case PR_SEEK_SET: | |
2279 where = SEEK_SET; | |
2280 break; | |
2281 case PR_SEEK_CUR: | |
2282 where = SEEK_CUR; | |
2283 break; | |
2284 case PR_SEEK_END: | |
2285 where = SEEK_END; | |
2286 break; | |
2287 default: | |
2288 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
2289 rv = -1; | |
2290 goto done; | |
2291 } | |
2292 rv = lseek(fd->secret->md.osfd,offset,where); | |
2293 if (rv == -1) | |
2294 { | |
2295 PRInt32 syserr = _MD_ERRNO(); | |
2296 _PR_MD_MAP_LSEEK_ERROR(syserr); | |
2297 } | |
2298 done: | |
2299 return(rv); | |
2300 } | |
2301 | |
2302 PROffset64 _MD_lseek64(PRFileDesc *fd, PROffset64 offset, PRSeekWhence whence) | |
2303 { | |
2304 PRInt32 where; | |
2305 PROffset64 rv; | |
2306 | |
2307 switch (whence) | |
2308 { | |
2309 case PR_SEEK_SET: | |
2310 where = SEEK_SET; | |
2311 break; | |
2312 case PR_SEEK_CUR: | |
2313 where = SEEK_CUR; | |
2314 break; | |
2315 case PR_SEEK_END: | |
2316 where = SEEK_END; | |
2317 break; | |
2318 default: | |
2319 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
2320 rv = minus_one; | |
2321 goto done; | |
2322 } | |
2323 rv = _md_iovector._lseek64(fd->secret->md.osfd, offset, where); | |
2324 if (LL_EQ(rv, minus_one)) | |
2325 { | |
2326 PRInt32 syserr = _MD_ERRNO(); | |
2327 _PR_MD_MAP_LSEEK_ERROR(syserr); | |
2328 } | |
2329 done: | |
2330 return rv; | |
2331 } /* _MD_lseek64 */ | |
2332 | |
2333 /* | |
2334 ** _MD_set_fileinfo_times -- | |
2335 ** Set the modifyTime and creationTime of the PRFileInfo | |
2336 ** structure using the values in struct stat. | |
2337 ** | |
2338 ** _MD_set_fileinfo64_times -- | |
2339 ** Set the modifyTime and creationTime of the PRFileInfo64 | |
2340 ** structure using the values in _MDStat64. | |
2341 */ | |
2342 | |
2343 #if defined(_PR_STAT_HAS_ST_ATIM) | |
2344 /* | |
2345 ** struct stat has st_atim, st_mtim, and st_ctim fields of | |
2346 ** type timestruc_t. | |
2347 */ | |
2348 static void _MD_set_fileinfo_times( | |
2349 const struct stat *sb, | |
2350 PRFileInfo *info) | |
2351 { | |
2352 PRInt64 us, s2us; | |
2353 | |
2354 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2355 LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); | |
2356 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2357 LL_I2L(us, sb->st_mtim.tv_nsec / 1000); | |
2358 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2359 LL_I2L(info->creationTime, sb->st_ctim.tv_sec); | |
2360 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2361 LL_I2L(us, sb->st_ctim.tv_nsec / 1000); | |
2362 LL_ADD(info->creationTime, info->creationTime, us); | |
2363 } | |
2364 | |
2365 static void _MD_set_fileinfo64_times( | |
2366 const _MDStat64 *sb, | |
2367 PRFileInfo64 *info) | |
2368 { | |
2369 PRInt64 us, s2us; | |
2370 | |
2371 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2372 LL_I2L(info->modifyTime, sb->st_mtim.tv_sec); | |
2373 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2374 LL_I2L(us, sb->st_mtim.tv_nsec / 1000); | |
2375 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2376 LL_I2L(info->creationTime, sb->st_ctim.tv_sec); | |
2377 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2378 LL_I2L(us, sb->st_ctim.tv_nsec / 1000); | |
2379 LL_ADD(info->creationTime, info->creationTime, us); | |
2380 } | |
2381 #elif defined(_PR_STAT_HAS_ST_ATIM_UNION) | |
2382 /* | |
2383 ** The st_atim, st_mtim, and st_ctim fields in struct stat are | |
2384 ** unions with a st__tim union member of type timestruc_t. | |
2385 */ | |
2386 static void _MD_set_fileinfo_times( | |
2387 const struct stat *sb, | |
2388 PRFileInfo *info) | |
2389 { | |
2390 PRInt64 us, s2us; | |
2391 | |
2392 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2393 LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); | |
2394 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2395 LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); | |
2396 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2397 LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); | |
2398 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2399 LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); | |
2400 LL_ADD(info->creationTime, info->creationTime, us); | |
2401 } | |
2402 | |
2403 static void _MD_set_fileinfo64_times( | |
2404 const _MDStat64 *sb, | |
2405 PRFileInfo64 *info) | |
2406 { | |
2407 PRInt64 us, s2us; | |
2408 | |
2409 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2410 LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec); | |
2411 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2412 LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000); | |
2413 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2414 LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec); | |
2415 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2416 LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000); | |
2417 LL_ADD(info->creationTime, info->creationTime, us); | |
2418 } | |
2419 #elif defined(_PR_STAT_HAS_ST_ATIMESPEC) | |
2420 /* | |
2421 ** struct stat has st_atimespec, st_mtimespec, and st_ctimespec | |
2422 ** fields of type struct timespec. | |
2423 */ | |
2424 #if defined(_PR_TIMESPEC_HAS_TS_SEC) | |
2425 static void _MD_set_fileinfo_times( | |
2426 const struct stat *sb, | |
2427 PRFileInfo *info) | |
2428 { | |
2429 PRInt64 us, s2us; | |
2430 | |
2431 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2432 LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); | |
2433 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2434 LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); | |
2435 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2436 LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); | |
2437 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2438 LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); | |
2439 LL_ADD(info->creationTime, info->creationTime, us); | |
2440 } | |
2441 | |
2442 static void _MD_set_fileinfo64_times( | |
2443 const _MDStat64 *sb, | |
2444 PRFileInfo64 *info) | |
2445 { | |
2446 PRInt64 us, s2us; | |
2447 | |
2448 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2449 LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec); | |
2450 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2451 LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000); | |
2452 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2453 LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec); | |
2454 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2455 LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000); | |
2456 LL_ADD(info->creationTime, info->creationTime, us); | |
2457 } | |
2458 #else /* _PR_TIMESPEC_HAS_TS_SEC */ | |
2459 /* | |
2460 ** The POSIX timespec structure has tv_sec and tv_nsec. | |
2461 */ | |
2462 static void _MD_set_fileinfo_times( | |
2463 const struct stat *sb, | |
2464 PRFileInfo *info) | |
2465 { | |
2466 PRInt64 us, s2us; | |
2467 | |
2468 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2469 LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); | |
2470 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2471 LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); | |
2472 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2473 LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); | |
2474 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2475 LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); | |
2476 LL_ADD(info->creationTime, info->creationTime, us); | |
2477 } | |
2478 | |
2479 static void _MD_set_fileinfo64_times( | |
2480 const _MDStat64 *sb, | |
2481 PRFileInfo64 *info) | |
2482 { | |
2483 PRInt64 us, s2us; | |
2484 | |
2485 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2486 LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec); | |
2487 LL_MUL(info->modifyTime, info->modifyTime, s2us); | |
2488 LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000); | |
2489 LL_ADD(info->modifyTime, info->modifyTime, us); | |
2490 LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec); | |
2491 LL_MUL(info->creationTime, info->creationTime, s2us); | |
2492 LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000); | |
2493 LL_ADD(info->creationTime, info->creationTime, us); | |
2494 } | |
2495 #endif /* _PR_TIMESPEC_HAS_TS_SEC */ | |
2496 #elif defined(_PR_STAT_HAS_ONLY_ST_ATIME) | |
2497 /* | |
2498 ** struct stat only has st_atime, st_mtime, and st_ctime fields | |
2499 ** of type time_t. | |
2500 */ | |
2501 static void _MD_set_fileinfo_times( | |
2502 const struct stat *sb, | |
2503 PRFileInfo *info) | |
2504 { | |
2505 PRInt64 s, s2us; | |
2506 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2507 LL_I2L(s, sb->st_mtime); | |
2508 LL_MUL(s, s, s2us); | |
2509 info->modifyTime = s; | |
2510 LL_I2L(s, sb->st_ctime); | |
2511 LL_MUL(s, s, s2us); | |
2512 info->creationTime = s; | |
2513 } | |
2514 | |
2515 static void _MD_set_fileinfo64_times( | |
2516 const _MDStat64 *sb, | |
2517 PRFileInfo64 *info) | |
2518 { | |
2519 PRInt64 s, s2us; | |
2520 LL_I2L(s2us, PR_USEC_PER_SEC); | |
2521 LL_I2L(s, sb->st_mtime); | |
2522 LL_MUL(s, s, s2us); | |
2523 info->modifyTime = s; | |
2524 LL_I2L(s, sb->st_ctime); | |
2525 LL_MUL(s, s, s2us); | |
2526 info->creationTime = s; | |
2527 } | |
2528 #else | |
2529 #error "I don't know yet" | |
2530 #endif | |
2531 | |
2532 static int _MD_convert_stat_to_fileinfo( | |
2533 const struct stat *sb, | |
2534 PRFileInfo *info) | |
2535 { | |
2536 if (S_IFREG & sb->st_mode) | |
2537 info->type = PR_FILE_FILE; | |
2538 else if (S_IFDIR & sb->st_mode) | |
2539 info->type = PR_FILE_DIRECTORY; | |
2540 else | |
2541 info->type = PR_FILE_OTHER; | |
2542 | |
2543 #if defined(_PR_HAVE_LARGE_OFF_T) | |
2544 if (0x7fffffffL < sb->st_size) | |
2545 { | |
2546 PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); | |
2547 return -1; | |
2548 } | |
2549 #endif /* defined(_PR_HAVE_LARGE_OFF_T) */ | |
2550 info->size = sb->st_size; | |
2551 | |
2552 _MD_set_fileinfo_times(sb, info); | |
2553 return 0; | |
2554 } /* _MD_convert_stat_to_fileinfo */ | |
2555 | |
2556 static int _MD_convert_stat64_to_fileinfo64( | |
2557 const _MDStat64 *sb, | |
2558 PRFileInfo64 *info) | |
2559 { | |
2560 if (S_IFREG & sb->st_mode) | |
2561 info->type = PR_FILE_FILE; | |
2562 else if (S_IFDIR & sb->st_mode) | |
2563 info->type = PR_FILE_DIRECTORY; | |
2564 else | |
2565 info->type = PR_FILE_OTHER; | |
2566 | |
2567 LL_I2L(info->size, sb->st_size); | |
2568 | |
2569 _MD_set_fileinfo64_times(sb, info); | |
2570 return 0; | |
2571 } /* _MD_convert_stat64_to_fileinfo64 */ | |
2572 | |
2573 PRInt32 _MD_getfileinfo(const char *fn, PRFileInfo *info) | |
2574 { | |
2575 PRInt32 rv; | |
2576 struct stat sb; | |
2577 | |
2578 rv = stat(fn, &sb); | |
2579 if (rv < 0) | |
2580 _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); | |
2581 else if (NULL != info) | |
2582 rv = _MD_convert_stat_to_fileinfo(&sb, info); | |
2583 return rv; | |
2584 } | |
2585 | |
2586 PRInt32 _MD_getfileinfo64(const char *fn, PRFileInfo64 *info) | |
2587 { | |
2588 _MDStat64 sb; | |
2589 PRInt32 rv = _md_iovector._stat64(fn, &sb); | |
2590 if (rv < 0) | |
2591 _PR_MD_MAP_STAT_ERROR(_MD_ERRNO()); | |
2592 else if (NULL != info) | |
2593 rv = _MD_convert_stat64_to_fileinfo64(&sb, info); | |
2594 return rv; | |
2595 } | |
2596 | |
2597 PRInt32 _MD_getopenfileinfo(const PRFileDesc *fd, PRFileInfo *info) | |
2598 { | |
2599 struct stat sb; | |
2600 PRInt32 rv = fstat(fd->secret->md.osfd, &sb); | |
2601 if (rv < 0) | |
2602 _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); | |
2603 else if (NULL != info) | |
2604 rv = _MD_convert_stat_to_fileinfo(&sb, info); | |
2605 return rv; | |
2606 } | |
2607 | |
2608 PRInt32 _MD_getopenfileinfo64(const PRFileDesc *fd, PRFileInfo64 *info) | |
2609 { | |
2610 _MDStat64 sb; | |
2611 PRInt32 rv = _md_iovector._fstat64(fd->secret->md.osfd, &sb); | |
2612 if (rv < 0) | |
2613 _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO()); | |
2614 else if (NULL != info) | |
2615 rv = _MD_convert_stat64_to_fileinfo64(&sb, info); | |
2616 return rv; | |
2617 } | |
2618 | |
2619 /* | |
2620 * _md_iovector._open64 must be initialized to 'open' so that _PR_InitLog can | |
2621 * open the log file during NSPR initialization, before _md_iovector is | |
2622 * initialized by _PR_MD_FINAL_INIT. This means the log file cannot be a | |
2623 * large file on some platforms. | |
2624 */ | |
2625 #ifdef SYMBIAN | |
2626 struct _MD_IOVector _md_iovector; /* Will crash if NSPR_LOG_FILE is set. */ | |
2627 #else | |
2628 struct _MD_IOVector _md_iovector = { open }; | |
2629 #endif | |
2630 | |
2631 /* | |
2632 ** These implementations are to emulate large file routines on systems that | |
2633 ** don't have them. Their goal is to check in case overflow occurs. Otherwise | |
2634 ** they will just operate as normal using 32-bit file routines. | |
2635 ** | |
2636 ** The checking might be pre- or post-op, depending on the semantics. | |
2637 */ | |
2638 | |
2639 #if defined(SOLARIS2_5) | |
2640 | |
2641 static PRIntn _MD_solaris25_fstat64(PRIntn osfd, _MDStat64 *buf) | |
2642 { | |
2643 PRInt32 rv; | |
2644 struct stat sb; | |
2645 | |
2646 rv = fstat(osfd, &sb); | |
2647 if (rv >= 0) | |
2648 { | |
2649 /* | |
2650 ** I'm only copying the fields that are immediately needed. | |
2651 ** If somebody else calls this function, some of the fields | |
2652 ** may not be defined. | |
2653 */ | |
2654 (void)memset(buf, 0, sizeof(_MDStat64)); | |
2655 buf->st_mode = sb.st_mode; | |
2656 buf->st_ctim = sb.st_ctim; | |
2657 buf->st_mtim = sb.st_mtim; | |
2658 buf->st_size = sb.st_size; | |
2659 } | |
2660 return rv; | |
2661 } /* _MD_solaris25_fstat64 */ | |
2662 | |
2663 static PRIntn _MD_solaris25_stat64(const char *fn, _MDStat64 *buf) | |
2664 { | |
2665 PRInt32 rv; | |
2666 struct stat sb; | |
2667 | |
2668 rv = stat(fn, &sb); | |
2669 if (rv >= 0) | |
2670 { | |
2671 /* | |
2672 ** I'm only copying the fields that are immediately needed. | |
2673 ** If somebody else calls this function, some of the fields | |
2674 ** may not be defined. | |
2675 */ | |
2676 (void)memset(buf, 0, sizeof(_MDStat64)); | |
2677 buf->st_mode = sb.st_mode; | |
2678 buf->st_ctim = sb.st_ctim; | |
2679 buf->st_mtim = sb.st_mtim; | |
2680 buf->st_size = sb.st_size; | |
2681 } | |
2682 return rv; | |
2683 } /* _MD_solaris25_stat64 */ | |
2684 #endif /* defined(SOLARIS2_5) */ | |
2685 | |
2686 #if defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) | |
2687 | |
2688 static PROffset64 _MD_Unix_lseek64(PRIntn osfd, PROffset64 offset, PRIntn whence) | |
2689 { | |
2690 PRUint64 maxoff; | |
2691 PROffset64 rv = minus_one; | |
2692 LL_I2L(maxoff, 0x7fffffff); | |
2693 if (LL_CMP(offset, <=, maxoff)) | |
2694 { | |
2695 off_t off; | |
2696 LL_L2I(off, offset); | |
2697 LL_I2L(rv, lseek(osfd, off, whence)); | |
2698 } | |
2699 else errno = EFBIG; /* we can't go there */ | |
2700 return rv; | |
2701 } /* _MD_Unix_lseek64 */ | |
2702 | |
2703 static void* _MD_Unix_mmap64( | |
2704 void *addr, PRSize len, PRIntn prot, PRIntn flags, | |
2705 PRIntn fildes, PRInt64 offset) | |
2706 { | |
2707 PR_SetError(PR_FILE_TOO_BIG_ERROR, 0); | |
2708 return NULL; | |
2709 } /* _MD_Unix_mmap64 */ | |
2710 #endif /* defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) */ | |
2711 | |
2712 /* Android doesn't have mmap64. */ | |
2713 #if defined(ANDROID) | |
2714 extern void *__mmap2(void *, size_t, int, int, int, size_t); | |
2715 | |
2716 #define ANDROID_PAGE_SIZE 4096 | |
2717 | |
2718 static void * | |
2719 mmap64(void *addr, size_t len, int prot, int flags, int fd, loff_t offset) | |
2720 { | |
2721 if (offset & (ANDROID_PAGE_SIZE - 1)) { | |
2722 errno = EINVAL; | |
2723 return MAP_FAILED; | |
2724 } | |
2725 return __mmap2(addr, len, prot, flags, fd, offset / ANDROID_PAGE_SIZE); | |
2726 } | |
2727 #endif | |
2728 | |
2729 #if defined(OSF1) && defined(__GNUC__) | |
2730 | |
2731 /* | |
2732 * On OSF1 V5.0A, <sys/stat.h> defines stat and fstat as | |
2733 * macros when compiled under gcc, so it is rather tricky to | |
2734 * take the addresses of the real functions the macros expend | |
2735 * to. A simple solution is to define forwarder functions | |
2736 * and take the addresses of the forwarder functions instead. | |
2737 */ | |
2738 | |
2739 static int stat_forwarder(const char *path, struct stat *buffer) | |
2740 { | |
2741 return stat(path, buffer); | |
2742 } | |
2743 | |
2744 static int fstat_forwarder(int filedes, struct stat *buffer) | |
2745 { | |
2746 return fstat(filedes, buffer); | |
2747 } | |
2748 | |
2749 #endif | |
2750 | |
2751 static void _PR_InitIOV(void) | |
2752 { | |
2753 #if defined(SOLARIS2_5) | |
2754 PRLibrary *lib; | |
2755 void *open64_func; | |
2756 | |
2757 open64_func = PR_FindSymbolAndLibrary("open64", &lib); | |
2758 if (NULL != open64_func) | |
2759 { | |
2760 PR_ASSERT(NULL != lib); | |
2761 _md_iovector._open64 = (_MD_Open64)open64_func; | |
2762 _md_iovector._mmap64 = (_MD_Mmap64)PR_FindSymbol(lib, "mmap64"); | |
2763 _md_iovector._fstat64 = (_MD_Fstat64)PR_FindSymbol(lib, "fstat64"); | |
2764 _md_iovector._stat64 = (_MD_Stat64)PR_FindSymbol(lib, "stat64"); | |
2765 _md_iovector._lseek64 = (_MD_Lseek64)PR_FindSymbol(lib, "lseek64"); | |
2766 (void)PR_UnloadLibrary(lib); | |
2767 } | |
2768 else | |
2769 { | |
2770 _md_iovector._open64 = open; | |
2771 _md_iovector._mmap64 = _MD_Unix_mmap64; | |
2772 _md_iovector._fstat64 = _MD_solaris25_fstat64; | |
2773 _md_iovector._stat64 = _MD_solaris25_stat64; | |
2774 _md_iovector._lseek64 = _MD_Unix_lseek64; | |
2775 } | |
2776 #elif defined(_PR_NO_LARGE_FILES) | |
2777 _md_iovector._open64 = open; | |
2778 _md_iovector._mmap64 = _MD_Unix_mmap64; | |
2779 _md_iovector._fstat64 = fstat; | |
2780 _md_iovector._stat64 = stat; | |
2781 _md_iovector._lseek64 = _MD_Unix_lseek64; | |
2782 #elif defined(_PR_HAVE_OFF64_T) | |
2783 #if defined(IRIX5_3) || defined(ANDROID) | |
2784 /* | |
2785 * Android doesn't have open64. We pass the O_LARGEFILE flag to open | |
2786 * in _MD_open. | |
2787 */ | |
2788 _md_iovector._open64 = open; | |
2789 #else | |
2790 _md_iovector._open64 = open64; | |
2791 #endif | |
2792 _md_iovector._mmap64 = mmap64; | |
2793 _md_iovector._fstat64 = fstat64; | |
2794 _md_iovector._stat64 = stat64; | |
2795 _md_iovector._lseek64 = lseek64; | |
2796 #elif defined(_PR_HAVE_LARGE_OFF_T) | |
2797 _md_iovector._open64 = open; | |
2798 _md_iovector._mmap64 = mmap; | |
2799 #if defined(OSF1) && defined(__GNUC__) | |
2800 _md_iovector._fstat64 = fstat_forwarder; | |
2801 _md_iovector._stat64 = stat_forwarder; | |
2802 #else | |
2803 _md_iovector._fstat64 = fstat; | |
2804 _md_iovector._stat64 = stat; | |
2805 #endif | |
2806 _md_iovector._lseek64 = lseek; | |
2807 #else | |
2808 #error "I don't know yet" | |
2809 #endif | |
2810 LL_I2L(minus_one, -1); | |
2811 } /* _PR_InitIOV */ | |
2812 | |
2813 void _PR_UnixInit(void) | |
2814 { | |
2815 struct sigaction sigact; | |
2816 int rv; | |
2817 | |
2818 sigemptyset(&timer_set); | |
2819 | |
2820 #if !defined(_PR_PTHREADS) | |
2821 | |
2822 sigaddset(&timer_set, SIGALRM); | |
2823 sigemptyset(&empty_set); | |
2824 intr_timeout_ticks = | |
2825 PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS); | |
2826 | |
2827 #if defined(SOLARIS) || defined(IRIX) | |
2828 | |
2829 if (getenv("NSPR_SIGSEGV_HANDLE")) { | |
2830 sigact.sa_handler = sigsegvhandler; | |
2831 sigact.sa_flags = 0; | |
2832 sigact.sa_mask = timer_set; | |
2833 sigaction(SIGSEGV, &sigact, 0); | |
2834 } | |
2835 | |
2836 if (getenv("NSPR_SIGABRT_HANDLE")) { | |
2837 sigact.sa_handler = sigaborthandler; | |
2838 sigact.sa_flags = 0; | |
2839 sigact.sa_mask = timer_set; | |
2840 sigaction(SIGABRT, &sigact, 0); | |
2841 } | |
2842 | |
2843 if (getenv("NSPR_SIGBUS_HANDLE")) { | |
2844 sigact.sa_handler = sigbushandler; | |
2845 sigact.sa_flags = 0; | |
2846 sigact.sa_mask = timer_set; | |
2847 sigaction(SIGBUS, &sigact, 0); | |
2848 } | |
2849 | |
2850 #endif | |
2851 #endif /* !defined(_PR_PTHREADS) */ | |
2852 | |
2853 /* | |
2854 * Under HP-UX DCE threads, sigaction() installs a per-thread | |
2855 * handler, so we use sigvector() to install a process-wide | |
2856 * handler. | |
2857 */ | |
2858 #if defined(HPUX) && defined(_PR_DCETHREADS) | |
2859 { | |
2860 struct sigvec vec; | |
2861 | |
2862 vec.sv_handler = SIG_IGN; | |
2863 vec.sv_mask = 0; | |
2864 vec.sv_flags = 0; | |
2865 rv = sigvector(SIGPIPE, &vec, NULL); | |
2866 PR_ASSERT(0 == rv); | |
2867 } | |
2868 #else | |
2869 sigact.sa_handler = SIG_IGN; | |
2870 sigemptyset(&sigact.sa_mask); | |
2871 sigact.sa_flags = 0; | |
2872 rv = sigaction(SIGPIPE, &sigact, 0); | |
2873 PR_ASSERT(0 == rv); | |
2874 #endif /* HPUX && _PR_DCETHREADS */ | |
2875 | |
2876 _pr_rename_lock = PR_NewLock(); | |
2877 PR_ASSERT(NULL != _pr_rename_lock); | |
2878 _pr_Xfe_mon = PR_NewMonitor(); | |
2879 PR_ASSERT(NULL != _pr_Xfe_mon); | |
2880 | |
2881 _PR_InitIOV(); /* one last hack */ | |
2882 } | |
2883 | |
2884 void _PR_UnixCleanup(void) | |
2885 { | |
2886 if (_pr_rename_lock) { | |
2887 PR_DestroyLock(_pr_rename_lock); | |
2888 _pr_rename_lock = NULL; | |
2889 } | |
2890 if (_pr_Xfe_mon) { | |
2891 PR_DestroyMonitor(_pr_Xfe_mon); | |
2892 _pr_Xfe_mon = NULL; | |
2893 } | |
2894 } | |
2895 | |
2896 #if !defined(_PR_PTHREADS) | |
2897 | |
2898 /* | |
2899 * Variables used by the GC code, initialized in _MD_InitSegs(). | |
2900 */ | |
2901 static PRInt32 _pr_zero_fd = -1; | |
2902 static PRLock *_pr_md_lock = NULL; | |
2903 | |
2904 /* | |
2905 * _MD_InitSegs -- | |
2906 * | |
2907 * This is Unix's version of _PR_MD_INIT_SEGS(), which is | |
2908 * called by _PR_InitSegs(), which in turn is called by | |
2909 * PR_Init(). | |
2910 */ | |
2911 void _MD_InitSegs(void) | |
2912 { | |
2913 #ifdef DEBUG | |
2914 /* | |
2915 ** Disable using mmap(2) if NSPR_NO_MMAP is set | |
2916 */ | |
2917 if (getenv("NSPR_NO_MMAP")) { | |
2918 _pr_zero_fd = -2; | |
2919 return; | |
2920 } | |
2921 #endif | |
2922 _pr_zero_fd = open("/dev/zero",O_RDWR , 0); | |
2923 /* Prevent the fd from being inherited by child processes */ | |
2924 fcntl(_pr_zero_fd, F_SETFD, FD_CLOEXEC); | |
2925 _pr_md_lock = PR_NewLock(); | |
2926 } | |
2927 | |
2928 PRStatus _MD_AllocSegment(PRSegment *seg, PRUint32 size, void *vaddr) | |
2929 { | |
2930 static char *lastaddr = (char*) _PR_STACK_VMBASE; | |
2931 PRStatus retval = PR_SUCCESS; | |
2932 int prot; | |
2933 void *rv; | |
2934 | |
2935 PR_ASSERT(seg != 0); | |
2936 PR_ASSERT(size != 0); | |
2937 | |
2938 PR_Lock(_pr_md_lock); | |
2939 if (_pr_zero_fd < 0) { | |
2940 from_heap: | |
2941 seg->vaddr = PR_MALLOC(size); | |
2942 if (!seg->vaddr) { | |
2943 retval = PR_FAILURE; | |
2944 } | |
2945 else { | |
2946 seg->size = size; | |
2947 } | |
2948 goto exit; | |
2949 } | |
2950 | |
2951 prot = PROT_READ|PROT_WRITE; | |
2952 /* | |
2953 * On Alpha Linux, the user-level thread stack needs | |
2954 * to be made executable because longjmp/signal seem | |
2955 * to put machine instructions on the stack. | |
2956 */ | |
2957 #if defined(LINUX) && defined(__alpha) | |
2958 prot |= PROT_EXEC; | |
2959 #endif | |
2960 rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot, | |
2961 _MD_MMAP_FLAGS, | |
2962 _pr_zero_fd, 0); | |
2963 if (rv == (void*)-1) { | |
2964 goto from_heap; | |
2965 } | |
2966 lastaddr += size; | |
2967 seg->vaddr = rv; | |
2968 seg->size = size; | |
2969 seg->flags = _PR_SEG_VM; | |
2970 | |
2971 exit: | |
2972 PR_Unlock(_pr_md_lock); | |
2973 return retval; | |
2974 } | |
2975 | |
2976 void _MD_FreeSegment(PRSegment *seg) | |
2977 { | |
2978 if (seg->flags & _PR_SEG_VM) | |
2979 (void) munmap(seg->vaddr, seg->size); | |
2980 else | |
2981 PR_DELETE(seg->vaddr); | |
2982 } | |
2983 | |
2984 #endif /* _PR_PTHREADS */ | |
2985 | |
2986 /* | |
2987 *----------------------------------------------------------------------- | |
2988 * | |
2989 * PR_Now -- | |
2990 * | |
2991 * Returns the current time in microseconds since the epoch. | |
2992 * The epoch is midnight January 1, 1970 GMT. | |
2993 * The implementation is machine dependent. This is the Unix | |
2994 * implementation. | |
2995 * Cf. time_t time(time_t *tp) | |
2996 * | |
2997 *----------------------------------------------------------------------- | |
2998 */ | |
2999 | |
3000 PR_IMPLEMENT(PRTime) | |
3001 PR_Now(void) | |
3002 { | |
3003 struct timeval tv; | |
3004 PRInt64 s, us, s2us; | |
3005 | |
3006 GETTIMEOFDAY(&tv); | |
3007 LL_I2L(s2us, PR_USEC_PER_SEC); | |
3008 LL_I2L(s, tv.tv_sec); | |
3009 LL_I2L(us, tv.tv_usec); | |
3010 LL_MUL(s, s, s2us); | |
3011 LL_ADD(s, s, us); | |
3012 return s; | |
3013 } | |
3014 | |
3015 #if defined(_MD_INTERVAL_USE_GTOD) | |
3016 /* | |
3017 * This version of interval times is based on the time of day | |
3018 * capability offered by the system. This isn't valid for two reasons: | |
3019 * 1) The time of day is neither linear nor montonically increasing | |
3020 * 2) The units here are milliseconds. That's not appropriate for our use. | |
3021 */ | |
3022 PRIntervalTime _PR_UNIX_GetInterval() | |
3023 { | |
3024 struct timeval time; | |
3025 PRIntervalTime ticks; | |
3026 | |
3027 (void)GETTIMEOFDAY(&time); /* fallicy of course */ | |
3028 ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds */ | |
3029 ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */ | |
3030 return ticks; | |
3031 } /* _PR_UNIX_GetInterval */ | |
3032 | |
3033 PRIntervalTime _PR_UNIX_TicksPerSecond() | |
3034 { | |
3035 return 1000; /* this needs some work :) */ | |
3036 } | |
3037 #endif | |
3038 | |
3039 #if defined(HAVE_CLOCK_MONOTONIC) | |
3040 PRIntervalTime _PR_UNIX_GetInterval2() | |
3041 { | |
3042 struct timespec time; | |
3043 PRIntervalTime ticks; | |
3044 | |
3045 if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) { | |
3046 fprintf(stderr, "clock_gettime failed: %d\n", errno); | |
3047 abort(); | |
3048 } | |
3049 | |
3050 ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; | |
3051 ticks += (PRUint32)time.tv_nsec / PR_NSEC_PER_MSEC; | |
3052 return ticks; | |
3053 } | |
3054 | |
3055 PRIntervalTime _PR_UNIX_TicksPerSecond2() | |
3056 { | |
3057 return 1000; | |
3058 } | |
3059 #endif | |
3060 | |
3061 #if !defined(_PR_PTHREADS) | |
3062 /* | |
3063 * Wait for I/O on multiple descriptors. | |
3064 * | |
3065 * Return 0 if timed out, return -1 if interrupted, | |
3066 * else return the number of ready descriptors. | |
3067 */ | |
3068 PRInt32 _PR_WaitForMultipleFDs( | |
3069 _PRUnixPollDesc *unixpds, | |
3070 PRInt32 pdcnt, | |
3071 PRIntervalTime timeout) | |
3072 { | |
3073 PRPollQueue pq; | |
3074 PRIntn is; | |
3075 PRInt32 rv; | |
3076 _PRCPU *io_cpu; | |
3077 _PRUnixPollDesc *unixpd, *eunixpd; | |
3078 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
3079 | |
3080 PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); | |
3081 | |
3082 if (_PR_PENDING_INTERRUPT(me)) { | |
3083 me->flags &= ~_PR_INTERRUPT; | |
3084 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
3085 return -1; | |
3086 } | |
3087 | |
3088 pq.pds = unixpds; | |
3089 pq.npds = pdcnt; | |
3090 | |
3091 _PR_INTSOFF(is); | |
3092 _PR_MD_IOQ_LOCK(); | |
3093 _PR_THREAD_LOCK(me); | |
3094 | |
3095 pq.thr = me; | |
3096 io_cpu = me->cpu; | |
3097 pq.on_ioq = PR_TRUE; | |
3098 pq.timeout = timeout; | |
3099 _PR_ADD_TO_IOQ(pq, me->cpu); | |
3100 | |
3101 #if !defined(_PR_USE_POLL) | |
3102 eunixpd = unixpds + pdcnt; | |
3103 for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { | |
3104 PRInt32 osfd = unixpd->osfd; | |
3105 if (unixpd->in_flags & _PR_UNIX_POLL_READ) { | |
3106 FD_SET(osfd, &_PR_FD_READ_SET(me->cpu)); | |
3107 _PR_FD_READ_CNT(me->cpu)[osfd]++; | |
3108 } | |
3109 if (unixpd->in_flags & _PR_UNIX_POLL_WRITE) { | |
3110 FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
3111 (_PR_FD_WRITE_CNT(me->cpu))[osfd]++; | |
3112 } | |
3113 if (unixpd->in_flags & _PR_UNIX_POLL_EXCEPT) { | |
3114 FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
3115 (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++; | |
3116 } | |
3117 if (osfd > _PR_IOQ_MAX_OSFD(me->cpu)) { | |
3118 _PR_IOQ_MAX_OSFD(me->cpu) = osfd; | |
3119 } | |
3120 } | |
3121 #endif /* !defined(_PR_USE_POLL) */ | |
3122 | |
3123 if (_PR_IOQ_TIMEOUT(me->cpu) > timeout) { | |
3124 _PR_IOQ_TIMEOUT(me->cpu) = timeout; | |
3125 } | |
3126 | |
3127 _PR_IOQ_OSFD_CNT(me->cpu) += pdcnt; | |
3128 | |
3129 _PR_SLEEPQ_LOCK(me->cpu); | |
3130 _PR_ADD_SLEEPQ(me, timeout); | |
3131 me->state = _PR_IO_WAIT; | |
3132 me->io_pending = PR_TRUE; | |
3133 me->io_suspended = PR_FALSE; | |
3134 _PR_SLEEPQ_UNLOCK(me->cpu); | |
3135 _PR_THREAD_UNLOCK(me); | |
3136 _PR_MD_IOQ_UNLOCK(); | |
3137 | |
3138 _PR_MD_WAIT(me, timeout); | |
3139 | |
3140 me->io_pending = PR_FALSE; | |
3141 me->io_suspended = PR_FALSE; | |
3142 | |
3143 /* | |
3144 * This thread should run on the same cpu on which it was blocked; when | |
3145 * the IO request times out the fd sets and fd counts for the | |
3146 * cpu are updated below. | |
3147 */ | |
3148 PR_ASSERT(me->cpu == io_cpu); | |
3149 | |
3150 /* | |
3151 ** If we timed out the pollq might still be on the ioq. Remove it | |
3152 ** before continuing. | |
3153 */ | |
3154 if (pq.on_ioq) { | |
3155 _PR_MD_IOQ_LOCK(); | |
3156 /* | |
3157 * Need to check pq.on_ioq again | |
3158 */ | |
3159 if (pq.on_ioq) { | |
3160 PR_REMOVE_LINK(&pq.links); | |
3161 #ifndef _PR_USE_POLL | |
3162 eunixpd = unixpds + pdcnt; | |
3163 for (unixpd = unixpds; unixpd < eunixpd; unixpd++) { | |
3164 PRInt32 osfd = unixpd->osfd; | |
3165 PRInt16 in_flags = unixpd->in_flags; | |
3166 | |
3167 if (in_flags & _PR_UNIX_POLL_READ) { | |
3168 if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) | |
3169 FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu)); | |
3170 } | |
3171 if (in_flags & _PR_UNIX_POLL_WRITE) { | |
3172 if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) | |
3173 FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu)); | |
3174 } | |
3175 if (in_flags & _PR_UNIX_POLL_EXCEPT) { | |
3176 if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) | |
3177 FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu)); | |
3178 } | |
3179 } | |
3180 #endif /* _PR_USE_POLL */ | |
3181 PR_ASSERT(pq.npds == pdcnt); | |
3182 _PR_IOQ_OSFD_CNT(me->cpu) -= pdcnt; | |
3183 PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0); | |
3184 } | |
3185 _PR_MD_IOQ_UNLOCK(); | |
3186 } | |
3187 /* XXX Should we use _PR_FAST_INTSON or _PR_INTSON? */ | |
3188 if (1 == pdcnt) { | |
3189 _PR_FAST_INTSON(is); | |
3190 } else { | |
3191 _PR_INTSON(is); | |
3192 } | |
3193 | |
3194 if (_PR_PENDING_INTERRUPT(me)) { | |
3195 me->flags &= ~_PR_INTERRUPT; | |
3196 PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0); | |
3197 return -1; | |
3198 } | |
3199 | |
3200 rv = 0; | |
3201 if (pq.on_ioq == PR_FALSE) { | |
3202 /* Count the number of ready descriptors */ | |
3203 while (--pdcnt >= 0) { | |
3204 if (unixpds->out_flags != 0) { | |
3205 rv++; | |
3206 } | |
3207 unixpds++; | |
3208 } | |
3209 } | |
3210 | |
3211 return rv; | |
3212 } | |
3213 | |
3214 /* | |
3215 * Unblock threads waiting for I/O | |
3216 * used when interrupting threads | |
3217 * | |
3218 * NOTE: The thread lock should held when this function is called. | |
3219 * On return, the thread lock is released. | |
3220 */ | |
3221 void _PR_Unblock_IO_Wait(PRThread *thr) | |
3222 { | |
3223 int pri = thr->priority; | |
3224 _PRCPU *cpu = thr->cpu; | |
3225 | |
3226 /* | |
3227 * GLOBAL threads wakeup periodically to check for interrupt | |
3228 */ | |
3229 if (_PR_IS_NATIVE_THREAD(thr)) { | |
3230 _PR_THREAD_UNLOCK(thr); | |
3231 return; | |
3232 } | |
3233 | |
3234 PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ)); | |
3235 _PR_SLEEPQ_LOCK(cpu); | |
3236 _PR_DEL_SLEEPQ(thr, PR_TRUE); | |
3237 _PR_SLEEPQ_UNLOCK(cpu); | |
3238 | |
3239 PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD)); | |
3240 thr->state = _PR_RUNNABLE; | |
3241 _PR_RUNQ_LOCK(cpu); | |
3242 _PR_ADD_RUNQ(thr, cpu, pri); | |
3243 _PR_RUNQ_UNLOCK(cpu); | |
3244 _PR_THREAD_UNLOCK(thr); | |
3245 _PR_MD_WAKEUP_WAITER(thr); | |
3246 } | |
3247 #endif /* !defined(_PR_PTHREADS) */ | |
3248 | |
3249 /* | |
3250 * When a nonblocking connect has completed, determine whether it | |
3251 * succeeded or failed, and if it failed, what the error code is. | |
3252 * | |
3253 * The function returns the error code. An error code of 0 means | |
3254 * that the nonblocking connect succeeded. | |
3255 */ | |
3256 | |
3257 int _MD_unix_get_nonblocking_connect_error(int osfd) | |
3258 { | |
3259 #if defined(NTO) | |
3260 /* Neutrino does not support the SO_ERROR socket option */ | |
3261 PRInt32 rv; | |
3262 PRNetAddr addr; | |
3263 _PRSockLen_t addrlen = sizeof(addr); | |
3264 | |
3265 /* Test to see if we are using the Tiny TCP/IP Stack or the Full one. */ | |
3266 struct statvfs superblock; | |
3267 rv = fstatvfs(osfd, &superblock); | |
3268 if (rv == 0) { | |
3269 if (strcmp(superblock.f_basetype, "ttcpip") == 0) { | |
3270 /* Using the Tiny Stack! */ | |
3271 rv = getpeername(osfd, (struct sockaddr *) &addr, | |
3272 (_PRSockLen_t *) &addrlen); | |
3273 if (rv == -1) { | |
3274 int errno_copy = errno; /* make a copy so I don't | |
3275 * accidentally reset */ | |
3276 | |
3277 if (errno_copy == ENOTCONN) { | |
3278 struct stat StatInfo; | |
3279 rv = fstat(osfd, &StatInfo); | |
3280 if (rv == 0) { | |
3281 time_t current_time = time(NULL); | |
3282 | |
3283 /* | |
3284 * this is a real hack, can't explain why it | |
3285 * works it just does | |
3286 */ | |
3287 if (abs(current_time - StatInfo.st_atime) < 5) { | |
3288 return ECONNREFUSED; | |
3289 } else { | |
3290 return ETIMEDOUT; | |
3291 } | |
3292 } else { | |
3293 return ECONNREFUSED; | |
3294 } | |
3295 } else { | |
3296 return errno_copy; | |
3297 } | |
3298 } else { | |
3299 /* No Error */ | |
3300 return 0; | |
3301 } | |
3302 } else { | |
3303 /* Have the FULL Stack which supports SO_ERROR */ | |
3304 /* Hasn't been written yet, never been tested! */ | |
3305 /* Jerry.Kirk@Nexwarecorp.com */ | |
3306 | |
3307 int err; | |
3308 _PRSockLen_t optlen = sizeof(err); | |
3309 | |
3310 if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, | |
3311 (char *) &err, &optlen) == -1) { | |
3312 return errno; | |
3313 } else { | |
3314 return err; | |
3315 } | |
3316 } | |
3317 } else { | |
3318 return ECONNREFUSED; | |
3319 } | |
3320 #elif defined(UNIXWARE) | |
3321 /* | |
3322 * getsockopt() fails with EPIPE, so use getmsg() instead. | |
3323 */ | |
3324 | |
3325 int rv; | |
3326 int flags = 0; | |
3327 rv = getmsg(osfd, NULL, NULL, &flags); | |
3328 PR_ASSERT(-1 == rv || 0 == rv); | |
3329 if (-1 == rv && errno != EAGAIN && errno != EWOULDBLOCK) { | |
3330 return errno; | |
3331 } | |
3332 return 0; /* no error */ | |
3333 #else | |
3334 int err; | |
3335 _PRSockLen_t optlen = sizeof(err); | |
3336 if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char *) &err, &optlen) == -1) { | |
3337 return errno; | |
3338 } else { | |
3339 return err; | |
3340 } | |
3341 #endif | |
3342 } | |
3343 | |
3344 /************************************************************************/ | |
3345 | |
3346 /* | |
3347 ** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread | |
3348 ** safe. Unfortunately, neither is mozilla. To make these programs work | |
3349 ** in a pre-emptive threaded environment, we need to use a lock. | |
3350 */ | |
3351 | |
3352 void PR_XLock(void) | |
3353 { | |
3354 PR_EnterMonitor(_pr_Xfe_mon); | |
3355 } | |
3356 | |
3357 void PR_XUnlock(void) | |
3358 { | |
3359 PR_ExitMonitor(_pr_Xfe_mon); | |
3360 } | |
3361 | |
3362 PRBool PR_XIsLocked(void) | |
3363 { | |
3364 return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE; | |
3365 } | |
3366 | |
3367 void PR_XWait(int ms) | |
3368 { | |
3369 PR_Wait(_pr_Xfe_mon, PR_MillisecondsToInterval(ms)); | |
3370 } | |
3371 | |
3372 void PR_XNotify(void) | |
3373 { | |
3374 PR_Notify(_pr_Xfe_mon); | |
3375 } | |
3376 | |
3377 void PR_XNotifyAll(void) | |
3378 { | |
3379 PR_NotifyAll(_pr_Xfe_mon); | |
3380 } | |
3381 | |
3382 #if defined(HAVE_FCNTL_FILE_LOCKING) | |
3383 | |
3384 PRStatus | |
3385 _MD_LockFile(PRInt32 f) | |
3386 { | |
3387 PRInt32 rv; | |
3388 struct flock arg; | |
3389 | |
3390 arg.l_type = F_WRLCK; | |
3391 arg.l_whence = SEEK_SET; | |
3392 arg.l_start = 0; | |
3393 arg.l_len = 0; /* until EOF */ | |
3394 rv = fcntl(f, F_SETLKW, &arg); | |
3395 if (rv == 0) | |
3396 return PR_SUCCESS; | |
3397 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
3398 return PR_FAILURE; | |
3399 } | |
3400 | |
3401 PRStatus | |
3402 _MD_TLockFile(PRInt32 f) | |
3403 { | |
3404 PRInt32 rv; | |
3405 struct flock arg; | |
3406 | |
3407 arg.l_type = F_WRLCK; | |
3408 arg.l_whence = SEEK_SET; | |
3409 arg.l_start = 0; | |
3410 arg.l_len = 0; /* until EOF */ | |
3411 rv = fcntl(f, F_SETLK, &arg); | |
3412 if (rv == 0) | |
3413 return PR_SUCCESS; | |
3414 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
3415 return PR_FAILURE; | |
3416 } | |
3417 | |
3418 PRStatus | |
3419 _MD_UnlockFile(PRInt32 f) | |
3420 { | |
3421 PRInt32 rv; | |
3422 struct flock arg; | |
3423 | |
3424 arg.l_type = F_UNLCK; | |
3425 arg.l_whence = SEEK_SET; | |
3426 arg.l_start = 0; | |
3427 arg.l_len = 0; /* until EOF */ | |
3428 rv = fcntl(f, F_SETLK, &arg); | |
3429 if (rv == 0) | |
3430 return PR_SUCCESS; | |
3431 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
3432 return PR_FAILURE; | |
3433 } | |
3434 | |
3435 #elif defined(HAVE_BSD_FLOCK) | |
3436 | |
3437 #include <sys/file.h> | |
3438 | |
3439 PRStatus | |
3440 _MD_LockFile(PRInt32 f) | |
3441 { | |
3442 PRInt32 rv; | |
3443 rv = flock(f, LOCK_EX); | |
3444 if (rv == 0) | |
3445 return PR_SUCCESS; | |
3446 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
3447 return PR_FAILURE; | |
3448 } | |
3449 | |
3450 PRStatus | |
3451 _MD_TLockFile(PRInt32 f) | |
3452 { | |
3453 PRInt32 rv; | |
3454 rv = flock(f, LOCK_EX|LOCK_NB); | |
3455 if (rv == 0) | |
3456 return PR_SUCCESS; | |
3457 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
3458 return PR_FAILURE; | |
3459 } | |
3460 | |
3461 PRStatus | |
3462 _MD_UnlockFile(PRInt32 f) | |
3463 { | |
3464 PRInt32 rv; | |
3465 rv = flock(f, LOCK_UN); | |
3466 if (rv == 0) | |
3467 return PR_SUCCESS; | |
3468 _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO()); | |
3469 return PR_FAILURE; | |
3470 } | |
3471 #else | |
3472 | |
3473 PRStatus | |
3474 _MD_LockFile(PRInt32 f) | |
3475 { | |
3476 PRInt32 rv; | |
3477 rv = lockf(f, F_LOCK, 0); | |
3478 if (rv == 0) | |
3479 return PR_SUCCESS; | |
3480 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); | |
3481 return PR_FAILURE; | |
3482 } | |
3483 | |
3484 PRStatus | |
3485 _MD_TLockFile(PRInt32 f) | |
3486 { | |
3487 PRInt32 rv; | |
3488 rv = lockf(f, F_TLOCK, 0); | |
3489 if (rv == 0) | |
3490 return PR_SUCCESS; | |
3491 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); | |
3492 return PR_FAILURE; | |
3493 } | |
3494 | |
3495 PRStatus | |
3496 _MD_UnlockFile(PRInt32 f) | |
3497 { | |
3498 PRInt32 rv; | |
3499 rv = lockf(f, F_ULOCK, 0); | |
3500 if (rv == 0) | |
3501 return PR_SUCCESS; | |
3502 _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO()); | |
3503 return PR_FAILURE; | |
3504 } | |
3505 #endif | |
3506 | |
3507 PRStatus _MD_gethostname(char *name, PRUint32 namelen) | |
3508 { | |
3509 PRIntn rv; | |
3510 | |
3511 rv = gethostname(name, namelen); | |
3512 if (0 == rv) { | |
3513 return PR_SUCCESS; | |
3514 } | |
3515 _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO()); | |
3516 return PR_FAILURE; | |
3517 } | |
3518 | |
3519 PRStatus _MD_getsysinfo(PRSysInfo cmd, char *name, PRUint32 namelen) | |
3520 { | |
3521 struct utsname info; | |
3522 | |
3523 PR_ASSERT((cmd == PR_SI_SYSNAME) || (cmd == PR_SI_RELEASE)); | |
3524 | |
3525 if (uname(&info) == -1) { | |
3526 _PR_MD_MAP_DEFAULT_ERROR(errno); | |
3527 return PR_FAILURE; | |
3528 } | |
3529 if (PR_SI_SYSNAME == cmd) | |
3530 (void)PR_snprintf(name, namelen, info.sysname); | |
3531 else if (PR_SI_RELEASE == cmd) | |
3532 (void)PR_snprintf(name, namelen, info.release); | |
3533 else | |
3534 return PR_FAILURE; | |
3535 return PR_SUCCESS; | |
3536 } | |
3537 | |
3538 /* | |
3539 ******************************************************************* | |
3540 * | |
3541 * Memory-mapped files | |
3542 * | |
3543 ******************************************************************* | |
3544 */ | |
3545 | |
3546 PRStatus _MD_CreateFileMap(PRFileMap *fmap, PRInt64 size) | |
3547 { | |
3548 PRFileInfo info; | |
3549 PRUint32 sz; | |
3550 | |
3551 LL_L2UI(sz, size); | |
3552 if (sz) { | |
3553 if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) { | |
3554 return PR_FAILURE; | |
3555 } | |
3556 if (sz > info.size) { | |
3557 /* | |
3558 * Need to extend the file | |
3559 */ | |
3560 if (fmap->prot != PR_PROT_READWRITE) { | |
3561 PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0); | |
3562 return PR_FAILURE; | |
3563 } | |
3564 if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) { | |
3565 return PR_FAILURE; | |
3566 } | |
3567 if (PR_Write(fmap->fd, "", 1) != 1) { | |
3568 return PR_FAILURE; | |
3569 } | |
3570 } | |
3571 } | |
3572 if (fmap->prot == PR_PROT_READONLY) { | |
3573 fmap->md.prot = PROT_READ; | |
3574 #ifdef OSF1V4_MAP_PRIVATE_BUG | |
3575 /* | |
3576 * Use MAP_SHARED to work around a bug in OSF1 V4.0D | |
3577 * (QAR 70220 in the OSF_QAR database) that results in | |
3578 * corrupted data in the memory-mapped region. This | |
3579 * bug is fixed in V5.0. | |
3580 */ | |
3581 fmap->md.flags = MAP_SHARED; | |
3582 #else | |
3583 fmap->md.flags = MAP_PRIVATE; | |
3584 #endif | |
3585 } else if (fmap->prot == PR_PROT_READWRITE) { | |
3586 fmap->md.prot = PROT_READ | PROT_WRITE; | |
3587 fmap->md.flags = MAP_SHARED; | |
3588 } else { | |
3589 PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY); | |
3590 fmap->md.prot = PROT_READ | PROT_WRITE; | |
3591 fmap->md.flags = MAP_PRIVATE; | |
3592 } | |
3593 return PR_SUCCESS; | |
3594 } | |
3595 | |
3596 void * _MD_MemMap( | |
3597 PRFileMap *fmap, | |
3598 PRInt64 offset, | |
3599 PRUint32 len) | |
3600 { | |
3601 PRInt32 off; | |
3602 void *addr; | |
3603 | |
3604 LL_L2I(off, offset); | |
3605 if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags, | |
3606 fmap->fd->secret->md.osfd, off)) == (void *) -1) { | |
3607 _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO()); | |
3608 addr = NULL; | |
3609 } | |
3610 return addr; | |
3611 } | |
3612 | |
3613 PRStatus _MD_MemUnmap(void *addr, PRUint32 len) | |
3614 { | |
3615 if (munmap(addr, len) == 0) { | |
3616 return PR_SUCCESS; | |
3617 } | |
3618 _PR_MD_MAP_DEFAULT_ERROR(errno); | |
3619 return PR_FAILURE; | |
3620 } | |
3621 | |
3622 PRStatus _MD_CloseFileMap(PRFileMap *fmap) | |
3623 { | |
3624 if ( PR_TRUE == fmap->md.isAnonFM ) { | |
3625 PRStatus rc = PR_Close( fmap->fd ); | |
3626 if ( PR_FAILURE == rc ) { | |
3627 PR_LOG( _pr_io_lm, PR_LOG_DEBUG, | |
3628 ("_MD_CloseFileMap(): error closing anonymnous file map osfd")); | |
3629 return PR_FAILURE; | |
3630 } | |
3631 } | |
3632 PR_DELETE(fmap); | |
3633 return PR_SUCCESS; | |
3634 } | |
3635 | |
3636 PRStatus _MD_SyncMemMap( | |
3637 PRFileDesc *fd, | |
3638 void *addr, | |
3639 PRUint32 len) | |
3640 { | |
3641 /* msync(..., MS_SYNC) alone is sufficient to flush modified data to disk | |
3642 * synchronously. It is not necessary to call fsync. */ | |
3643 if (msync(addr, len, MS_SYNC) == 0) { | |
3644 return PR_SUCCESS; | |
3645 } | |
3646 _PR_MD_MAP_DEFAULT_ERROR(errno); | |
3647 return PR_FAILURE; | |
3648 } | |
3649 | |
3650 #if defined(_PR_NEED_FAKE_POLL) | |
3651 | |
3652 /* | |
3653 * Some platforms don't have poll(). For easier porting of code | |
3654 * that calls poll(), we emulate poll() using select(). | |
3655 */ | |
3656 | |
3657 int poll(struct pollfd *filedes, unsigned long nfds, int timeout) | |
3658 { | |
3659 int i; | |
3660 int rv; | |
3661 int maxfd; | |
3662 fd_set rd, wr, ex; | |
3663 struct timeval tv, *tvp; | |
3664 | |
3665 if (timeout < 0 && timeout != -1) { | |
3666 errno = EINVAL; | |
3667 return -1; | |
3668 } | |
3669 | |
3670 if (timeout == -1) { | |
3671 tvp = NULL; | |
3672 } else { | |
3673 tv.tv_sec = timeout / 1000; | |
3674 tv.tv_usec = (timeout % 1000) * 1000; | |
3675 tvp = &tv; | |
3676 } | |
3677 | |
3678 maxfd = -1; | |
3679 FD_ZERO(&rd); | |
3680 FD_ZERO(&wr); | |
3681 FD_ZERO(&ex); | |
3682 | |
3683 for (i = 0; i < nfds; i++) { | |
3684 int osfd = filedes[i].fd; | |
3685 int events = filedes[i].events; | |
3686 PRBool fdHasEvent = PR_FALSE; | |
3687 | |
3688 if (osfd < 0) { | |
3689 continue; /* Skip this osfd. */ | |
3690 } | |
3691 | |
3692 /* | |
3693 * Map the poll events to the select fd_sets. | |
3694 * POLLIN, POLLRDNORM ===> readable | |
3695 * POLLOUT, POLLWRNORM ===> writable | |
3696 * POLLPRI, POLLRDBAND ===> exception | |
3697 * POLLNORM, POLLWRBAND (and POLLMSG on some platforms) | |
3698 * are ignored. | |
3699 * | |
3700 * The output events POLLERR and POLLHUP are never turned on. | |
3701 * POLLNVAL may be turned on. | |
3702 */ | |
3703 | |
3704 if (events & (POLLIN | POLLRDNORM)) { | |
3705 FD_SET(osfd, &rd); | |
3706 fdHasEvent = PR_TRUE; | |
3707 } | |
3708 if (events & (POLLOUT | POLLWRNORM)) { | |
3709 FD_SET(osfd, &wr); | |
3710 fdHasEvent = PR_TRUE; | |
3711 } | |
3712 if (events & (POLLPRI | POLLRDBAND)) { | |
3713 FD_SET(osfd, &ex); | |
3714 fdHasEvent = PR_TRUE; | |
3715 } | |
3716 if (fdHasEvent && osfd > maxfd) { | |
3717 maxfd = osfd; | |
3718 } | |
3719 } | |
3720 | |
3721 rv = select(maxfd + 1, &rd, &wr, &ex, tvp); | |
3722 | |
3723 /* Compute poll results */ | |
3724 if (rv > 0) { | |
3725 rv = 0; | |
3726 for (i = 0; i < nfds; i++) { | |
3727 PRBool fdHasEvent = PR_FALSE; | |
3728 | |
3729 filedes[i].revents = 0; | |
3730 if (filedes[i].fd < 0) { | |
3731 continue; | |
3732 } | |
3733 if (FD_ISSET(filedes[i].fd, &rd)) { | |
3734 if (filedes[i].events & POLLIN) { | |
3735 filedes[i].revents |= POLLIN; | |
3736 } | |
3737 if (filedes[i].events & POLLRDNORM) { | |
3738 filedes[i].revents |= POLLRDNORM; | |
3739 } | |
3740 fdHasEvent = PR_TRUE; | |
3741 } | |
3742 if (FD_ISSET(filedes[i].fd, &wr)) { | |
3743 if (filedes[i].events & POLLOUT) { | |
3744 filedes[i].revents |= POLLOUT; | |
3745 } | |
3746 if (filedes[i].events & POLLWRNORM) { | |
3747 filedes[i].revents |= POLLWRNORM; | |
3748 } | |
3749 fdHasEvent = PR_TRUE; | |
3750 } | |
3751 if (FD_ISSET(filedes[i].fd, &ex)) { | |
3752 if (filedes[i].events & POLLPRI) { | |
3753 filedes[i].revents |= POLLPRI; | |
3754 } | |
3755 if (filedes[i].events & POLLRDBAND) { | |
3756 filedes[i].revents |= POLLRDBAND; | |
3757 } | |
3758 fdHasEvent = PR_TRUE; | |
3759 } | |
3760 if (fdHasEvent) { | |
3761 rv++; | |
3762 } | |
3763 } | |
3764 PR_ASSERT(rv > 0); | |
3765 } else if (rv == -1 && errno == EBADF) { | |
3766 rv = 0; | |
3767 for (i = 0; i < nfds; i++) { | |
3768 filedes[i].revents = 0; | |
3769 if (filedes[i].fd < 0) { | |
3770 continue; | |
3771 } | |
3772 if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) { | |
3773 filedes[i].revents = POLLNVAL; | |
3774 rv++; | |
3775 } | |
3776 } | |
3777 PR_ASSERT(rv > 0); | |
3778 } | |
3779 PR_ASSERT(-1 != timeout || rv != 0); | |
3780 | |
3781 return rv; | |
3782 } | |
3783 #endif /* _PR_NEED_FAKE_POLL */ |