comparison nspr/pr/src/io/prfdcach.c @ 0:1e5118fa0cb1

This is NSS with a Cmake Buildsyste To compile a static NSS library for Windows we've used the Chromium-NSS fork and added a Cmake buildsystem to compile it statically for Windows. See README.chromium for chromium changes and README.trustbridge for our modifications.
author Andre Heinecke <andre.heinecke@intevation.de>
date Mon, 28 Jul 2014 10:47:06 +0200
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:1e5118fa0cb1
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 #include "primpl.h"
7
8 #include <string.h>
9
10 /*****************************************************************************/
11 /*****************************************************************************/
12 /************************** File descriptor caching **************************/
13 /*****************************************************************************/
14 /*****************************************************************************/
15
16 /*
17 ** This code is built into debuggable versions of NSPR to assist in
18 ** finding misused file descriptors. Since file descritors (PRFileDesc)
19 ** are identified by a pointer to their structure, they can be the
20 ** target of dangling references. Furthermore, NSPR caches and tries
21 ** to aggressively reuse file descriptors, leading to more ambiguity.
22 ** The following code will allow a debugging client to set environment
23 ** variables and control the number of file descriptors that will be
24 ** preserved before they are recycled. The environment variables are
25 ** NSPR_FD_CACHE_SIZE_LOW and NSPR_FD_CACHE_SIZE_HIGH. The former sets
26 ** the number of descriptors NSPR will allocate before beginning to
27 ** recycle. The latter is the maximum number permitted in the cache
28 ** (exclusive of those in use) at a time.
29 */
30 typedef struct _PR_Fd_Cache
31 {
32 PRLock *ml;
33 PRIntn count;
34 PRStack *stack;
35 PRFileDesc *head, *tail;
36 PRIntn limit_low, limit_high;
37 } _PR_Fd_Cache;
38
39 static _PR_Fd_Cache _pr_fd_cache;
40 static PRFileDesc **stack2fd = &(((PRFileDesc*)NULL)->higher);
41
42
43 /*
44 ** Get a FileDescriptor from the cache if one exists. If not allocate
45 ** a new one from the heap.
46 */
47 PRFileDesc *_PR_Getfd(void)
48 {
49 PRFileDesc *fd;
50 /*
51 ** $$$
52 ** This may look a little wasteful. We'll see. Right now I want to
53 ** be able to toggle between caching and not at runtime to measure
54 ** the differences. If it isn't too annoying, I'll leave it in.
55 ** $$$$
56 **
57 ** The test is against _pr_fd_cache.limit_high. If that's zero,
58 ** we're not doing the extended cache but going for performance.
59 */
60 if (0 == _pr_fd_cache.limit_high)
61 {
62 PRStackElem *pop;
63 PR_ASSERT(NULL != _pr_fd_cache.stack);
64 pop = PR_StackPop(_pr_fd_cache.stack);
65 if (NULL == pop) goto allocate;
66 fd = (PRFileDesc*)((PRPtrdiff)pop - (PRPtrdiff)stack2fd);
67 }
68 else
69 {
70 do
71 {
72 if (NULL == _pr_fd_cache.head) goto allocate; /* nothing there */
73 if (_pr_fd_cache.count < _pr_fd_cache.limit_low) goto allocate;
74
75 /* we "should" be able to extract an fd from the cache */
76 PR_Lock(_pr_fd_cache.ml); /* need the lock to do this safely */
77 fd = _pr_fd_cache.head; /* protected extraction */
78 if (NULL == fd) /* unexpected, but not fatal */
79 {
80 PR_ASSERT(0 == _pr_fd_cache.count);
81 PR_ASSERT(NULL == _pr_fd_cache.tail);
82 }
83 else
84 {
85 _pr_fd_cache.count -= 1;
86 _pr_fd_cache.head = fd->higher;
87 if (NULL == _pr_fd_cache.head)
88 {
89 PR_ASSERT(0 == _pr_fd_cache.count);
90 _pr_fd_cache.tail = NULL;
91 }
92 PR_ASSERT(&_pr_faulty_methods == fd->methods);
93 PR_ASSERT(PR_INVALID_IO_LAYER == fd->identity);
94 PR_ASSERT(_PR_FILEDESC_FREED == fd->secret->state);
95 }
96 PR_Unlock(_pr_fd_cache.ml);
97
98 } while (NULL == fd); /* then go around and allocate a new one */
99 }
100
101 finished:
102 fd->dtor = NULL;
103 fd->lower = fd->higher = NULL;
104 fd->identity = PR_NSPR_IO_LAYER;
105 memset(fd->secret, 0, sizeof(PRFilePrivate));
106 return fd;
107
108 allocate:
109 fd = PR_NEW(PRFileDesc);
110 if (NULL != fd)
111 {
112 fd->secret = PR_NEW(PRFilePrivate);
113 if (NULL == fd->secret) PR_DELETE(fd);
114 }
115 if (NULL != fd) goto finished;
116 else return NULL;
117
118 } /* _PR_Getfd */
119
120 /*
121 ** Return a file descriptor to the cache unless there are too many in
122 ** there already. If put in cache, clear the fields first.
123 */
124 void _PR_Putfd(PRFileDesc *fd)
125 {
126 PR_ASSERT(PR_NSPR_IO_LAYER == fd->identity);
127 fd->methods = &_pr_faulty_methods;
128 fd->identity = PR_INVALID_IO_LAYER;
129 fd->secret->state = _PR_FILEDESC_FREED;
130
131 if (0 == _pr_fd_cache.limit_high)
132 {
133 PR_StackPush(_pr_fd_cache.stack, (PRStackElem*)(&fd->higher));
134 }
135 else
136 {
137 if (_pr_fd_cache.count > _pr_fd_cache.limit_high)
138 {
139 PR_Free(fd->secret);
140 PR_Free(fd);
141 }
142 else
143 {
144 PR_Lock(_pr_fd_cache.ml);
145 if (NULL == _pr_fd_cache.tail)
146 {
147 PR_ASSERT(0 == _pr_fd_cache.count);
148 PR_ASSERT(NULL == _pr_fd_cache.head);
149 _pr_fd_cache.head = _pr_fd_cache.tail = fd;
150 }
151 else
152 {
153 PR_ASSERT(NULL == _pr_fd_cache.tail->higher);
154 _pr_fd_cache.tail->higher = fd;
155 _pr_fd_cache.tail = fd; /* new value */
156 }
157 fd->higher = NULL; /* always so */
158 _pr_fd_cache.count += 1; /* count the new entry */
159 PR_Unlock(_pr_fd_cache.ml);
160 }
161 }
162 } /* _PR_Putfd */
163
164 PR_IMPLEMENT(PRStatus) PR_SetFDCacheSize(PRIntn low, PRIntn high)
165 {
166 /*
167 ** This can be called at any time, may adjust the cache sizes,
168 ** turn the caches off, or turn them on. It is not dependent
169 ** on the compilation setting of DEBUG.
170 */
171 if (!_pr_initialized) _PR_ImplicitInitialization();
172
173 if (low > high) low = high; /* sanity check the params */
174
175 PR_Lock(_pr_fd_cache.ml);
176 if (0 == high) /* shutting down or staying down */
177 {
178 if (0 != _pr_fd_cache.limit_high) /* shutting down */
179 {
180 _pr_fd_cache.limit_high = 0; /* stop use */
181 /*
182 ** Hold the lock throughout - nobody's going to want it
183 ** other than another caller to this routine. Just don't
184 ** let that happen.
185 **
186 ** Put all the cached fds onto the new cache.
187 */
188 while (NULL != _pr_fd_cache.head)
189 {
190 PRFileDesc *fd = _pr_fd_cache.head;
191 _pr_fd_cache.head = fd->higher;
192 PR_StackPush(_pr_fd_cache.stack, (PRStackElem*)(&fd->higher));
193 }
194 _pr_fd_cache.limit_low = 0;
195 _pr_fd_cache.tail = NULL;
196 _pr_fd_cache.count = 0;
197 }
198 }
199 else /* starting up or just adjusting parameters */
200 {
201 PRBool was_using_stack = (0 == _pr_fd_cache.limit_high);
202 _pr_fd_cache.limit_low = low;
203 _pr_fd_cache.limit_high = high;
204 if (was_using_stack) /* was using stack - feed into cache */
205 {
206 PRStackElem *pop;
207 while (NULL != (pop = PR_StackPop(_pr_fd_cache.stack)))
208 {
209 PRFileDesc *fd = (PRFileDesc*)
210 ((PRPtrdiff)pop - (PRPtrdiff)stack2fd);
211 if (NULL == _pr_fd_cache.tail) _pr_fd_cache.tail = fd;
212 fd->higher = _pr_fd_cache.head;
213 _pr_fd_cache.head = fd;
214 _pr_fd_cache.count += 1;
215 }
216 }
217 }
218 PR_Unlock(_pr_fd_cache.ml);
219 return PR_SUCCESS;
220 } /* PR_SetFDCacheSize */
221
222 void _PR_InitFdCache(void)
223 {
224 /*
225 ** The fd caching is enabled by default for DEBUG builds,
226 ** disabled by default for OPT builds. That default can
227 ** be overridden at runtime using environment variables
228 ** or a super-wiz-bang API.
229 */
230 const char *low = PR_GetEnv("NSPR_FD_CACHE_SIZE_LOW");
231 const char *high = PR_GetEnv("NSPR_FD_CACHE_SIZE_HIGH");
232
233 /*
234 ** _low is allowed to be zero, _high is not.
235 ** If _high is zero, we're not doing the caching.
236 */
237
238 _pr_fd_cache.limit_low = 0;
239 #if defined(DEBUG)
240 _pr_fd_cache.limit_high = FD_SETSIZE;
241 #else
242 _pr_fd_cache.limit_high = 0;
243 #endif /* defined(DEBUG) */
244
245 if (NULL != low) _pr_fd_cache.limit_low = atoi(low);
246 if (NULL != high) _pr_fd_cache.limit_high = atoi(high);
247
248 if (_pr_fd_cache.limit_low < 0)
249 _pr_fd_cache.limit_low = 0;
250 if (_pr_fd_cache.limit_low > FD_SETSIZE)
251 _pr_fd_cache.limit_low = FD_SETSIZE;
252
253 if (_pr_fd_cache.limit_high > FD_SETSIZE)
254 _pr_fd_cache.limit_high = FD_SETSIZE;
255
256 if (_pr_fd_cache.limit_high < _pr_fd_cache.limit_low)
257 _pr_fd_cache.limit_high = _pr_fd_cache.limit_low;
258
259 _pr_fd_cache.ml = PR_NewLock();
260 PR_ASSERT(NULL != _pr_fd_cache.ml);
261 _pr_fd_cache.stack = PR_CreateStack("FD");
262 PR_ASSERT(NULL != _pr_fd_cache.stack);
263
264 } /* _PR_InitFdCache */
265
266 void _PR_CleanupFdCache(void)
267 {
268 PRFileDesc *fd, *next;
269 PRStackElem *pop;
270
271 for (fd = _pr_fd_cache.head; fd != NULL; fd = next)
272 {
273 next = fd->higher;
274 PR_DELETE(fd->secret);
275 PR_DELETE(fd);
276 }
277 _pr_fd_cache.head = NULL;
278 _pr_fd_cache.tail = NULL;
279 _pr_fd_cache.count = 0;
280 PR_DestroyLock(_pr_fd_cache.ml);
281 _pr_fd_cache.ml = NULL;
282 while ((pop = PR_StackPop(_pr_fd_cache.stack)) != NULL)
283 {
284 fd = (PRFileDesc*)((PRPtrdiff)pop - (PRPtrdiff)stack2fd);
285 PR_DELETE(fd->secret);
286 PR_DELETE(fd);
287 }
288 PR_DestroyStack(_pr_fd_cache.stack);
289 _pr_fd_cache.stack = NULL;
290 } /* _PR_CleanupFdCache */
291
292 /* prfdcach.c */
This site is hosted by Intevation GmbH (Datenschutzerklärung und Impressum | Privacy Policy and Imprint)