Some cleanup after 3s4i code audition
[mmondor.git] / mmsoftware / mmlib / mmpool.c
CommitLineData
546359b9 1/* $Id: mmpool.c,v 1.27 2004/08/07 23:08:56 mmondor Exp $ */
7a56f31f
MM
2
3/*
d4aaa170 4 * Copyright (C) 2001-2004, Matthew Mondor
7a56f31f
MM
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
d4aaa170 17 * This product includes software developed by Matthew Mondor.
7a56f31f
MM
18 * 4. The name of Matthew Mondor may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
d4aaa170
MM
21 * 5. Redistribution of source code may not be released under the terms of
22 * any GNU Public License derivate.
7a56f31f
MM
23 *
24 * THIS SOFTWARE IS PROVIDED BY MATTHEW MONDOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL MATTHEW MONDOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
30 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36
37
7a56f31f
MM
38#include <sys/types.h>
39#include <stdlib.h>
ed396790 40#include <syslog.h>
7a56f31f
MM
41
42#include <mmtypes.h>
43#include <mmstring.h>
44#include <mmlist.h>
45#include <mmpool.h>
2556ec01 46#include <mmlog.h>
7a56f31f
MM
47
48
49
d4aaa170 50MMCOPYRIGHT("@(#) Copyright (c) 2001-2004\n\
7a56f31f 51\tMatthew Mondor. All rights reserved.\n");
546359b9 52MMRCSID("$Id: mmpool.c,v 1.27 2004/08/07 23:08:56 mmondor Exp $");
7a56f31f
MM
53
54
55
343b3a6c
MM
56/* DEFINITIONS */
57
109627ac 58#define BPAGE_SIZE ((size_t)OALIGN_CEIL(sizeof(bpage_t), long))
343b3a6c 59
ed396790
MM
60/* Define MMPOOLDEBUG and DEBUG for extremely verbose diagnostics */
61#ifdef MMPOOLDEBUG
cea243ac 62#define POOL_DEBUG DEBUG_PRINTF
ed396790 63#else
a95888ea 64#define POOL_DEBUG(s...) ;
ed396790
MM
65#endif
66
343b3a6c
MM
67
68
69/* STATIC FUNCTION PROTOTYPES */
70
05bafc6f
MM
71static bpage_t * pool_page_create(const char *, pool_t *);
72static bpage_t * pool_page_destroy(bpage_t *);
343b3a6c
MM
73
74
75
76/* STATIC FUNCTIONS */
77
78/* Creates a new page of objects, and calls the constructor function for each
79 * object of the new page if needed. Returns NULL on failure, or the new
80 * bpage_t pointer on success.
81 */
82static bpage_t *pool_page_create(const char *func, pool_t *pool)
83{
546359b9
MM
84 register size_t nodesize = pool->nodesize;
85 register u_int8_t *ptr, *toptr;
86 register bpage_t *page;
87 register list_t *list;
343b3a6c
MM
88
89 if ((page = pool->malloc(pool->pagesize)) == NULL) {
221efeb4
MM
90 syslog(LOG_NOTICE,
91 "%s - Out of memory allocating page for pool_t (%s)",
92 func, pool->label);
343b3a6c
MM
93 return NULL;
94 }
95
96 /* Initialize bpage_t */
97 page->magic = MAGIC_PAGE;
98 page->pool = pool;
99 DLIST_INIT(&page->objects);
100
101 /* Create all objects of that bpage_t, inserting them into it's list_t.
102 * If any object creation fails (it's optional construtor function can
103 * fail), destroy the page and return NULL.
104 */
105 if (pool->create != NULL) {
106 for (ptr = toptr = (u_int8_t *)page, ptr += BPAGE_SIZE,
107 toptr += pool->pagesize, list = &page->objects;
108 ptr + nodesize < toptr; ptr += nodesize) {
109 ((pnode_t *)ptr)->magic = 0;
110 ((pnode_t *)ptr)->page = page;
111 if (!pool->create((pnode_t *)ptr)) {
ed396790 112 syslog(LOG_NOTICE, "%s - Page object constructor function "
221efeb4
MM
113 "returned error for pool_t (%s)",
114 func, pool->label);
343b3a6c
MM
115 return pool_page_destroy(page);
116 }
117 DLIST_APPEND(list, (node_t *)ptr);
118 }
119 } else {
120 for (ptr = toptr = (u_int8_t *)page, ptr += BPAGE_SIZE,
121 toptr += pool->pagesize, list = &page->objects;
122 ptr + nodesize < toptr; ptr += nodesize) {
123 ((pnode_t *)ptr)->magic = 0;
124 ((pnode_t *)ptr)->page = page;
125 DLIST_APPEND(list, (node_t *)ptr);
126 }
127 }
128
129 return page;
130}
131
132/* Destroys a page previously created using pool_page_create(), calling the
133 * destructor function for each object of the page if necessary. Returns NULL.
134 */
135static bpage_t *pool_page_destroy(bpage_t *page)
136{
546359b9
MM
137 register pnode_t *pnode;
138 register void (*destroy)(pnode_t *);
343b3a6c
MM
139
140 if ((destroy = (page->pool->destroy)) != NULL) {
141 /* We need to destroy all objects */
142 DLIST_FOREACH(&page->objects, pnode)
143 destroy(pnode);
144 }
145
146 page->pool->free(page);
147
148 return NULL;
149}
150
151
152
153/* PUBLIC FUNCTIONS */
7a56f31f 154
ed581bf0
MM
155/* Initializes a memory pool for efficient management of fixed sized nodes.
156 * returns TRUE on success or FALSE on failure.
157 */
ed396790 158bool pool_init(pool_t *pool, const char *label, void *(*mallocfunc)(size_t),
343b3a6c
MM
159 void (*freefunc)(void *), bool (*create)(pnode_t *),
160 void (*destroy)(pnode_t *), size_t nodesize, u_int32_t nodesperpage,
7a56f31f
MM
161 u_int32_t minpages, u_int32_t maxpages)
162{
163 bool ok = FALSE;
164
e89b4e26
MM
165 if (DEBUG_FALSE(POOL_VALID(pool))) {
166 DEBUG_PRINTF("pool_init", "Pool already initialized (%p = %s)",
fb5bbc21
MM
167 pool, pool->label);
168 return FALSE;
169 }
170
e89b4e26
MM
171 if (DEBUG_TRUE(pool != NULL && mallocfunc != NULL && freefunc != NULL &&
172 ((create != NULL && destroy != NULL) ||
173 (void *)create == (void *)destroy) &&
174 nodesize != 0 && nodesperpage > 0)) {
7a56f31f 175 register size_t ns = (size_t)OALIGN_CEIL(nodesize, long);
343b3a6c 176 register size_t ps = (BPAGE_SIZE + ((nodesperpage + 1) * ns));
7a56f31f
MM
177
178 pool->magic = 0;
ed396790 179 pool->label = label;
7a56f31f
MM
180 pool->malloc = mallocfunc;
181 pool->free = freefunc;
343b3a6c
MM
182 pool->create = create;
183 pool->destroy = destroy;
7a56f31f
MM
184 pool->nodesize = ns;
185 pool->minpages = minpages;
186 pool->maxpages = maxpages;
187 pool->nodesperpage = nodesperpage;
188 pool->pagesize = ps;
189 pool->avgtotal = pool->avgcnt = minpages;
d2558e27
MM
190 DLIST_INIT(&pool->pages);
191 DLIST_INIT(&pool->fpages);
343b3a6c
MM
192 DLIST_INIT(&pool->epages);
193
194 /* Allocate minimum number of pages, if needed. We insert them into
195 * the empty pages list, but minpages will be honored as such to
196 * never free pages below it.
197 */
7a56f31f
MM
198 for (; minpages > 0; minpages--) {
199 register bpage_t *p;
200
343b3a6c 201 if ((p = pool_page_create("pool_init", pool)) == NULL)
7a56f31f 202 break;
343b3a6c 203 DLIST_APPEND(&pool->epages, (node_t *)p);
7a56f31f 204 }
343b3a6c
MM
205
206 /* Validate this pool */
7a56f31f 207 pool->magic = MAGIC_POOL;
343b3a6c
MM
208
209 /* Have we failed to allocate any needed pages? If so, destroy pool
210 * and return FALSE.
211 */
7a56f31f
MM
212 if (minpages == 0)
213 ok = TRUE;
343b3a6c
MM
214 else if (minpages < pool->minpages)
215 (void) pool_destroy(pool);
216
7a56f31f 217 } else
e89b4e26 218 DEBUG_PRINTF("pool_init", "Invalid parameters");
7a56f31f 219
ed396790
MM
220 if (ok)
221 POOL_DEBUG("pool_init", "(%p = %s) initialized", pool, pool->label);
222
7a56f31f
MM
223 return ok;
224}
225
226
ed581bf0
MM
227/* Destroys a pool which previously has been created by pool_init(), and frees
228 * any resources it uses (all the nodes it created become invalid).
229 * Returns TRUE on success, or FALSE if the pool pointer is invalid.
230 */
7a56f31f
MM
231bool pool_destroy(pool_t *pool)
232{
233 bool ok = FALSE;
234
343b3a6c 235 /* Make sure pool_t is valid */
e89b4e26 236 if (DEBUG_TRUE(POOL_VALID(pool))) {
343b3a6c 237 register bpage_t *p, *t;
7a56f31f 238
ed396790
MM
239 POOL_DEBUG("pool_destroy", "(%p = %s) destroying", pool, pool->label);
240
343b3a6c 241 /* Destroy all pages of all lists */
d2558e27
MM
242 for (p = DLIST_TOP(&pool->pages); p != NULL; p = t) {
243 t = DLIST_NEXT(p);
ed396790 244 (void) pool_page_destroy(p);
7a56f31f 245 }
d2558e27
MM
246 for (p = DLIST_TOP(&pool->fpages); p != NULL; p = t) {
247 t = DLIST_NEXT(p);
ed396790 248 (void) pool_page_destroy(p);
343b3a6c
MM
249 }
250 for (p = DLIST_TOP(&pool->epages); p != NULL; p = t) {
251 t = DLIST_NEXT(p);
ed396790 252 (void) pool_page_destroy(p);
7a56f31f 253 }
343b3a6c
MM
254
255 /* Invalidate pool_t */
7a56f31f 256 pool->magic = 0;
343b3a6c 257
7a56f31f
MM
258 ok = TRUE;
259 } else
e89b4e26 260 DEBUG_PRINTF("pool_destroy", "Invalid pool_t pointer (%p)", pool);
7a56f31f
MM
261
262 return ok;
263}
264
265
ed581bf0
MM
266/* Allows to very efficiently allocate a single node from the specified pool,
267 * optionally initializing it's memory to zeros. Returns the newly allocated
268 * node pointer on success, or NULL on error.
269 */
7a56f31f
MM
270pnode_t *pool_alloc(pool_t *pool, bool zero)
271{
272 pnode_t *pnode = NULL;
273
e89b4e26
MM
274 if (DEBUG_TRUE(POOL_VALID(pool))) {
275 if (DEBUG_TRUE(!zero || pool->create == NULL)) {
343b3a6c
MM
276 register bpage_t *page;
277
278 /* Are there any partially used pages? */
279 if ((page = DLIST_TOP(&pool->pages)) == NULL) {
280 /* No, we thus attempt to move a page from our empty pages
281 * cache back into our usable pages list. The order of the
282 * usable page list becomes irrelevant at DLIST_SWAP() since
283 * it will be the only node.
284 */
ed396790
MM
285 POOL_DEBUG("pool_alloc", "(%p = %s) No usable page",
286 pool, pool->label);
343b3a6c 287 if ((page = DLIST_TOP(&pool->epages)) == NULL) {
8a4bbac3
MM
288 /* No more free pages in our cache neither. If maxpages is
289 * set and not yet reached, we need to create a new page.
290 * If we can't, return NULL for error.
343b3a6c 291 */
ed396790
MM
292 POOL_DEBUG("pool_alloc",
293 "(%p = %s) No empty page", pool, pool->label);
8a4bbac3 294 if (pool->maxpages == 0 ||
ed396790
MM
295 DLIST_NODES(&pool->fpages) < pool->maxpages) {
296 POOL_DEBUG("pool_alloc",
297 "(%p = %s) Creating new usable page",
298 pool, pool->label);
8a4bbac3
MM
299 if ((page = pool_page_create("pool_alloc", pool))
300 == NULL)
301 return NULL;
302 DLIST_APPEND(&pool->pages, (node_t *)page);
303 } else
343b3a6c 304 return NULL;
ed396790
MM
305 } else {
306 POOL_DEBUG("pool_alloc", "(%p = %s) swapping page "
307 "(%p) from empty to usable",
308 pool, pool->label, page);
343b3a6c
MM
309 DLIST_SWAP(&pool->pages, &pool->epages, (node_t *)page,
310 FALSE);
ed396790
MM
311 }
312 } else {
313 POOL_DEBUG("pool_alloc",
314 "(%p = %s) Allocating from usable page",
315 pool, pool->label);
343b3a6c 316 }
7a56f31f 317
343b3a6c
MM
318 /* <page> now points to a page we know we can at least get a free
319 * object node from. Obtain one and unlink it.
320 */
321 pnode = DLIST_TOP(&page->objects);
322 DLIST_UNLINK(&page->objects, (node_t *)pnode);
323
324 /* Have we obtained the last available object from this page?
325 * If so, move the page to the full pages list. The order of the
326 * full pages list nodes is irrelevant, since we don't know which
327 * of those pages are more likely to be swapped to the usable
328 * pages list first, and we won't run through that list, except at
329 * pool_destroy().
330 */
ed396790
MM
331 if (DLIST_NODES(&page->objects) == 0) {
332 POOL_DEBUG("pool_alloc", "(%p = %s) swapping page (%p) "
333 "from usable to full list",
334 pool, pool->label, page);
343b3a6c
MM
335 DLIST_SWAP(&pool->fpages, &pool->pages, (node_t *)page,
336 FALSE);
ed396790 337 }
7a56f31f 338
343b3a6c
MM
339 /* If requested, zero object. This is stupid if a constructor and
340 * destructor are used, but can be useful otherwise.
7a56f31f 341 */
343b3a6c
MM
342 if (zero) {
343 page = pnode->page;
344 mm_memclr(pnode, pool->nodesize);
345 pnode->page = page;
f36e2a66 346 }
343b3a6c
MM
347
348 /* Mark this node as a valid allocated object */
349 pnode->magic = MAGIC_PNODE;
ed396790
MM
350
351 POOL_DEBUG("pool_alloc",
352 "(%p = %s) got object (%p) from page (%p)",
353 pool, pool->label, pnode, page);
354
343b3a6c 355 } else
e89b4e26 356 DEBUG_PRINTF("pool_alloc",
221efeb4
MM
357 "Will not zero a constructed object of pool (%s)",
358 pool->label);
7a56f31f 359 } else
e89b4e26 360 DEBUG_PRINTF("pool_alloc", "Invalid pool_t pointer (%p)", pool);
7a56f31f
MM
361
362 return pnode;
363}
364
365
ed581bf0
MM
366/* Efficiently frees the specified node back to the pool it was allocated from.
367 * Returns NULL. Uses heuristics keeping statistics to determine when to
368 * actually shrink the memory blocks internally used by the pool, so that
369 * frequently growing and shrinking pools will remain large for scalability.
9316d0ce 370 * This also makes a big difference when constructors and destructors are used
05bafc6f 371 * and need to execute a significant amount of code.
ed581bf0 372 */
7a56f31f
MM
373pnode_t *pool_free(pnode_t *pnode)
374{
e89b4e26 375 if (DEBUG_TRUE(PNODE_VALID(pnode))) {
546359b9
MM
376 register bpage_t *page = pnode->page;
377 register pool_t *pool = page->pool;
378 register u_int32_t count;
7a56f31f 379
ed396790
MM
380 POOL_DEBUG("pool_free",
381 "(%p = %s) freeing object (%p) from page (%p)",
382 pool, pool->label, pnode, page);
383
343b3a6c 384 /* Invalidate object */
7a56f31f 385 pnode->magic = 0;
343b3a6c
MM
386
387 /* Record how many nodes there currently are in our page's object
388 * list, to be used later on
389 */
390 count = DLIST_NODES(&page->objects);
391
392 /* Add node back to it's page's object list. Insert it so that we
393 * favor reuse of recently used objects soon.
394 */
395 DLIST_INSERT(&page->objects, (node_t *)pnode);
396
397 /* Was this page full before we inserted our node? If so, page is in
398 * full pages list. Move it to the usable pages list. Insert it to
ed396790
MM
399 * favor reuse of recently used pages soon (this page will soon return
400 * back to the full pages list).
343b3a6c 401 */
ed396790
MM
402 if (count == 0) {
403 POOL_DEBUG("pool_free",
404 "(%p = %s) swapping page (%p) from full to usable list",
405 pool, pool->label, page);
343b3a6c 406 DLIST_SWAP(&pool->pages, &pool->fpages, (node_t *)page, TRUE);
ed396790 407 } else {
343b3a6c
MM
408 /* Did we cause our node insertion to totally free back the page?
409 * If so, the page is on the usable free list, but move it back to
410 * the empty pages list. Insert it to favor reuse of recently used
05bafc6f 411 * pages soon (this page will be the first to get used again when
ed396790 412 * the usable pages list needs pages).
343b3a6c 413 */
ed396790
MM
414 if (++count == pool->nodesperpage) {
415 POOL_DEBUG("pool_free", "(%p = %s) swapping page (%p) "
416 "from usable to empty list",
417 pool, pool->label, page);
343b3a6c 418 DLIST_SWAP(&pool->epages, &pool->pages, (node_t *)page, TRUE);
ed396790 419 }
343b3a6c
MM
420 }
421
7a56f31f
MM
422 if ((pool->minpages < pool->maxpages) ||
423 (pool->minpages == 0 && pool->maxpages == 0)) {
343b3a6c 424 register int exceeding;
7a56f31f 425
343b3a6c
MM
426 /* This pool_t is allowed to shrink. Maintain average pages usage
427 * to prevent destroying our pages in the empty pages list unless
428 * we should.
7a56f31f 429 */
343b3a6c
MM
430 count = DLIST_NODES(&pool->pages) + DLIST_NODES(&pool->fpages);
431 pool->avgtotal += count;
7a56f31f 432 pool->avgcnt++;
ed396790 433
05bafc6f 434 /* Using * 8 here means that pool_free() needs to at least be
e51f4fd7 435 * called to release as much objects as can fit into eight full
ed396790 436 * pages in order to execute the following block. But of course,
e760da44 437 * this does not mean that eight pages will recently have been
ed396790
MM
438 * freed, since allocations probably also occured meanwhile.
439 */
e51f4fd7 440 if (pool->avgcnt > pool->nodesperpage * 8) {
ed396790
MM
441 /* Do statistics suggest that we should shrink the pool?
442 * If so, free pages from our empty pages cache back to the
cf6bb6fb
MM
443 * system, destroying their objects if necessary. We'll make
444 * sure to at least leave a one page hysterisis for better
445 * performance.
ed396790
MM
446 */
447 if ((exceeding = (count + DLIST_NODES(&pool->epages) - 1) -
448 (pool->avgtotal / pool->avgcnt)) > 0) {
449 register list_t *epages = &pool->epages;
450
451 POOL_DEBUG("pool_alloc()",
452 "(%p = %s) Destroying %d exceeding pages",
453 pool, pool->label, exceeding);
454
455 /* Preferably free pages which haven't been used
456 * recently
457 */
458 for (; exceeding > 0 &&
459 (page = DLIST_BOTTOM(epages)) != NULL;
460 exceeding--) {
461 POOL_DEBUG("pool_free",
462 "(%p = %s) destroying page (%p)",
463 pool, pool->label, page);
464 DLIST_UNLINK(epages, (node_t *)page);
465 (void) pool_page_destroy(page);
466 }
467 }
468 /* Reset statistics */
7a56f31f 469 pool->avgcnt = 1;
343b3a6c 470 pool->avgtotal = count;
7a56f31f 471 }
7a56f31f
MM
472 }
473 } else
e89b4e26 474 DEBUG_PRINTF("pool_free", "Invalid pnode_t pointer (%p)", pnode);
7a56f31f
MM
475
476 return NULL;
477}