The new pool_alloc() failed to observe maxpages limit, it now does.
[mmondor.git] / mmsoftware / mmlib / mmpool.c
CommitLineData
8a4bbac3 1/* $Id: mmpool.c,v 1.14 2004/05/22 12:29:05 mmondor Exp $ */
7a56f31f
MM
2
3/*
d4aaa170 4 * Copyright (C) 2001-2004, Matthew Mondor
7a56f31f
MM
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
d4aaa170 17 * This product includes software developed by Matthew Mondor.
7a56f31f
MM
18 * 4. The name of Matthew Mondor may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
d4aaa170
MM
21 * 5. Redistribution of source code may not be released under the terms of
22 * any GNU Public License derivate.
7a56f31f
MM
23 *
24 * THIS SOFTWARE IS PROVIDED BY MATTHEW MONDOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL MATTHEW MONDOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
30 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36
37
38
39#include <sys/types.h>
40#include <stdlib.h>
7a56f31f
MM
41
42#include <mmtypes.h>
43#include <mmstring.h>
44#include <mmlist.h>
45#include <mmpool.h>
2556ec01 46#include <mmlog.h>
7a56f31f
MM
47
48
49
50
d4aaa170 51MMCOPYRIGHT("@(#) Copyright (c) 2001-2004\n\
7a56f31f 52\tMatthew Mondor. All rights reserved.\n");
8a4bbac3 53MMRCSID("$Id: mmpool.c,v 1.14 2004/05/22 12:29:05 mmondor Exp $");
7a56f31f
MM
54
55
56
343b3a6c
MM
57/* DEFINITIONS */
58
59#define BPAGE_SIZE ((size_t)OALIGN_CEIL(sizeof(bpage_t), long))
60
61
62
63/* STATIC FUNCTION PROTOTYPES */
64
65static bpage_t *pool_page_create(const char *, pool_t *);
66static bpage_t *pool_page_destroy(bpage_t *);
67
68
69
70/* STATIC FUNCTIONS */
71
72/* Creates a new page of objects, and calls the constructor function for each
73 * object of the new page if needed. Returns NULL on failure, or the new
74 * bpage_t pointer on success.
75 */
76static bpage_t *pool_page_create(const char *func, pool_t *pool)
77{
78 register size_t nodesize = pool->nodesize;
79 register u_int8_t *ptr, *toptr;
80 register bpage_t *page;
81 register list_t *list;
82
83 if ((page = pool->malloc(pool->pagesize)) == NULL) {
84 DPRINTF(func, "Out of memory allocating page");
85 return NULL;
86 }
87
88 /* Initialize bpage_t */
89 page->magic = MAGIC_PAGE;
90 page->pool = pool;
91 DLIST_INIT(&page->objects);
92
93 /* Create all objects of that bpage_t, inserting them into it's list_t.
94 * If any object creation fails (it's optional construtor function can
95 * fail), destroy the page and return NULL.
96 */
97 if (pool->create != NULL) {
98 for (ptr = toptr = (u_int8_t *)page, ptr += BPAGE_SIZE,
99 toptr += pool->pagesize, list = &page->objects;
100 ptr + nodesize < toptr; ptr += nodesize) {
101 ((pnode_t *)ptr)->magic = 0;
102 ((pnode_t *)ptr)->page = page;
103 if (!pool->create((pnode_t *)ptr)) {
104 DPRINTF(func,
105 "Page object constructor function returned error");
106 return pool_page_destroy(page);
107 }
108 DLIST_APPEND(list, (node_t *)ptr);
109 }
110 } else {
111 for (ptr = toptr = (u_int8_t *)page, ptr += BPAGE_SIZE,
112 toptr += pool->pagesize, list = &page->objects;
113 ptr + nodesize < toptr; ptr += nodesize) {
114 ((pnode_t *)ptr)->magic = 0;
115 ((pnode_t *)ptr)->page = page;
116 DLIST_APPEND(list, (node_t *)ptr);
117 }
118 }
119
120 return page;
121}
122
123/* Destroys a page previously created using pool_page_create(), calling the
124 * destructor function for each object of the page if necessary. Returns NULL.
125 */
126static bpage_t *pool_page_destroy(bpage_t *page)
127{
128 register pnode_t *pnode;
129 register void (*destroy)(pnode_t *);
130
131 if ((destroy = (page->pool->destroy)) != NULL) {
132 /* We need to destroy all objects */
133 DLIST_FOREACH(&page->objects, pnode)
134 destroy(pnode);
135 }
136
137 page->pool->free(page);
138
139 return NULL;
140}
141
142
143
144/* PUBLIC FUNCTIONS */
7a56f31f 145
ed581bf0
MM
146/* Initializes a memory pool for efficient management of fixed sized nodes.
147 * returns TRUE on success or FALSE on failure.
148 */
7a56f31f 149bool pool_init(pool_t *pool, void *(*mallocfunc)(size_t),
343b3a6c
MM
150 void (*freefunc)(void *), bool (*create)(pnode_t *),
151 void (*destroy)(pnode_t *), size_t nodesize, u_int32_t nodesperpage,
7a56f31f
MM
152 u_int32_t minpages, u_int32_t maxpages)
153{
154 bool ok = FALSE;
155
156 if (pool != NULL && mallocfunc != NULL && freefunc != NULL &&
343b3a6c
MM
157 ((create != NULL && destroy != NULL) ||
158 (void *)create == (void *)destroy) &&
7a56f31f 159 nodesize != 0 && nodesperpage > 0) {
7a56f31f 160 register size_t ns = (size_t)OALIGN_CEIL(nodesize, long);
343b3a6c 161 register size_t ps = (BPAGE_SIZE + ((nodesperpage + 1) * ns));
7a56f31f
MM
162
163 pool->magic = 0;
164 pool->malloc = mallocfunc;
165 pool->free = freefunc;
343b3a6c
MM
166 pool->create = create;
167 pool->destroy = destroy;
7a56f31f
MM
168 pool->nodesize = ns;
169 pool->minpages = minpages;
170 pool->maxpages = maxpages;
171 pool->nodesperpage = nodesperpage;
172 pool->pagesize = ps;
173 pool->avgtotal = pool->avgcnt = minpages;
d2558e27
MM
174 DLIST_INIT(&pool->pages);
175 DLIST_INIT(&pool->fpages);
343b3a6c
MM
176 DLIST_INIT(&pool->epages);
177
178 /* Allocate minimum number of pages, if needed. We insert them into
179 * the empty pages list, but minpages will be honored as such to
180 * never free pages below it.
181 */
7a56f31f
MM
182 for (; minpages > 0; minpages--) {
183 register bpage_t *p;
184
343b3a6c 185 if ((p = pool_page_create("pool_init", pool)) == NULL)
7a56f31f 186 break;
343b3a6c 187 DLIST_APPEND(&pool->epages, (node_t *)p);
7a56f31f 188 }
343b3a6c
MM
189
190 /* Validate this pool */
7a56f31f 191 pool->magic = MAGIC_POOL;
343b3a6c
MM
192
193 /* Have we failed to allocate any needed pages? If so, destroy pool
194 * and return FALSE.
195 */
7a56f31f
MM
196 if (minpages == 0)
197 ok = TRUE;
343b3a6c
MM
198 else if (minpages < pool->minpages)
199 (void) pool_destroy(pool);
200
7a56f31f 201 } else
460b991b 202 DPRINTF("pool_init", "Invalid parameters");
7a56f31f
MM
203
204 return ok;
205}
206
207
ed581bf0
MM
208/* Destroys a pool which previously has been created by pool_init(), and frees
209 * any resources it uses (all the nodes it created become invalid).
210 * Returns TRUE on success, or FALSE if the pool pointer is invalid.
211 */
7a56f31f
MM
212bool pool_destroy(pool_t *pool)
213{
214 bool ok = FALSE;
215
343b3a6c 216 /* Make sure pool_t is valid */
7a56f31f 217 if (POOL_VALID(pool)) {
343b3a6c 218 register bpage_t *p, *t;
7a56f31f 219
343b3a6c 220 /* Destroy all pages of all lists */
d2558e27
MM
221 for (p = DLIST_TOP(&pool->pages); p != NULL; p = t) {
222 t = DLIST_NEXT(p);
343b3a6c 223 pool_page_destroy(p);
7a56f31f 224 }
d2558e27
MM
225 for (p = DLIST_TOP(&pool->fpages); p != NULL; p = t) {
226 t = DLIST_NEXT(p);
343b3a6c
MM
227 pool_page_destroy(p);
228 }
229 for (p = DLIST_TOP(&pool->epages); p != NULL; p = t) {
230 t = DLIST_NEXT(p);
231 pool_page_destroy(p);
7a56f31f 232 }
343b3a6c
MM
233
234 /* Invalidate pool_t */
7a56f31f 235 pool->magic = 0;
343b3a6c 236
7a56f31f
MM
237 ok = TRUE;
238 } else
460b991b 239 DPRINTF("pool_destroy", "Invalid pool_t pointer (%p)", pool);
7a56f31f
MM
240
241 return ok;
242}
243
244
ed581bf0
MM
245/* Allows to very efficiently allocate a single node from the specified pool,
246 * optionally initializing it's memory to zeros. Returns the newly allocated
247 * node pointer on success, or NULL on error.
248 */
7a56f31f
MM
249pnode_t *pool_alloc(pool_t *pool, bool zero)
250{
251 pnode_t *pnode = NULL;
252
253 if (POOL_VALID(pool)) {
343b3a6c
MM
254 if (!zero || pool->create == NULL) {
255 register bpage_t *page;
256
257 /* Are there any partially used pages? */
258 if ((page = DLIST_TOP(&pool->pages)) == NULL) {
259 /* No, we thus attempt to move a page from our empty pages
260 * cache back into our usable pages list. The order of the
261 * usable page list becomes irrelevant at DLIST_SWAP() since
262 * it will be the only node.
263 */
264 if ((page = DLIST_TOP(&pool->epages)) == NULL) {
8a4bbac3
MM
265 /* No more free pages in our cache neither. If maxpages is
266 * set and not yet reached, we need to create a new page.
267 * If we can't, return NULL for error.
343b3a6c 268 */
8a4bbac3
MM
269 if (pool->maxpages == 0 ||
270 (DLIST_NODES(&pool->pages) +
271 DLIST_NODES(&pool->fpages) +
272 DLIST_NODES(&pool->epages)) < pool->maxpages) {
273 if ((page = pool_page_create("pool_alloc", pool))
274 == NULL)
275 return NULL;
276 DLIST_APPEND(&pool->pages, (node_t *)page);
277 } else
343b3a6c 278 return NULL;
343b3a6c
MM
279 } else
280 DLIST_SWAP(&pool->pages, &pool->epages, (node_t *)page,
281 FALSE);
282 }
7a56f31f 283
343b3a6c
MM
284 /* <page> now points to a page we know we can at least get a free
285 * object node from. Obtain one and unlink it.
286 */
287 pnode = DLIST_TOP(&page->objects);
288 DLIST_UNLINK(&page->objects, (node_t *)pnode);
289
290 /* Have we obtained the last available object from this page?
291 * If so, move the page to the full pages list. The order of the
292 * full pages list nodes is irrelevant, since we don't know which
293 * of those pages are more likely to be swapped to the usable
294 * pages list first, and we won't run through that list, except at
295 * pool_destroy().
296 */
297 if (DLIST_NODES(&page->objects) == 0)
298 DLIST_SWAP(&pool->fpages, &pool->pages, (node_t *)page,
299 FALSE);
7a56f31f 300
343b3a6c
MM
301 /* If requested, zero object. This is stupid if a constructor and
302 * destructor are used, but can be useful otherwise.
7a56f31f 303 */
343b3a6c
MM
304 if (zero) {
305 page = pnode->page;
306 mm_memclr(pnode, pool->nodesize);
307 pnode->page = page;
f36e2a66 308 }
343b3a6c
MM
309
310 /* Mark this node as a valid allocated object */
311 pnode->magic = MAGIC_PNODE;
312 } else
313 DPRINTF("pool_alloc",
314 "Will not zero a constructed object of pool (%p)", pool);
7a56f31f 315 } else
460b991b 316 DPRINTF("pool_alloc", "Invalid pool_t pointer (%p)", pool);
7a56f31f
MM
317
318 return pnode;
319}
320
321
ed581bf0
MM
322/* Efficiently frees the specified node back to the pool it was allocated from.
323 * Returns NULL. Uses heuristics keeping statistics to determine when to
324 * actually shrink the memory blocks internally used by the pool, so that
325 * frequently growing and shrinking pools will remain large for scalability.
326 */
7a56f31f
MM
327pnode_t *pool_free(pnode_t *pnode)
328{
329 if (PNODE_VALID(pnode)) {
343b3a6c
MM
330 register bpage_t *page = pnode->page;
331 register pool_t *pool = page->pool;
332 register u_int32_t count;
7a56f31f 333
343b3a6c 334 /* Invalidate object */
7a56f31f 335 pnode->magic = 0;
343b3a6c
MM
336
337 /* Record how many nodes there currently are in our page's object
338 * list, to be used later on
339 */
340 count = DLIST_NODES(&page->objects);
341
342 /* Add node back to it's page's object list. Insert it so that we
343 * favor reuse of recently used objects soon.
344 */
345 DLIST_INSERT(&page->objects, (node_t *)pnode);
346
347 /* Was this page full before we inserted our node? If so, page is in
348 * full pages list. Move it to the usable pages list. Insert it to
349 * favor reuse of recently used pages soon.
350 */
351 if (count == pool->nodesperpage)
352 DLIST_SWAP(&pool->pages, &pool->fpages, (node_t *)page, TRUE);
353 else {
354 /* Did we cause our node insertion to totally free back the page?
355 * If so, the page is on the usable free list, but move it back to
356 * the empty pages list. Insert it to favor reuse of recently used
357 * pages soon.
358 */
359 if (++count == pool->nodesperpage)
360 DLIST_SWAP(&pool->epages, &pool->pages, (node_t *)page, TRUE);
361 }
362
7a56f31f
MM
363 if ((pool->minpages < pool->maxpages) ||
364 (pool->minpages == 0 && pool->maxpages == 0)) {
343b3a6c 365 register int exceeding;
7a56f31f 366
343b3a6c
MM
367 /* This pool_t is allowed to shrink. Maintain average pages usage
368 * to prevent destroying our pages in the empty pages list unless
369 * we should.
7a56f31f 370 */
343b3a6c
MM
371 count = DLIST_NODES(&pool->pages) + DLIST_NODES(&pool->fpages);
372 pool->avgtotal += count;
7a56f31f 373 pool->avgcnt++;
343b3a6c 374 /* XXX Is (pool->nodesperpage * 3) decent here? */
7a56f31f
MM
375 if (pool->avgcnt > pool->nodesperpage * 3) {
376 pool->avgcnt = 1;
343b3a6c 377 pool->avgtotal = count;
7a56f31f
MM
378 }
379 /* Do statistics suggest that we should shrink the pool? If so,
343b3a6c
MM
380 * free pages from our empty pages cache back to the system,
381 * destroying their objects if necessary.
7a56f31f 382 */
343b3a6c 383 if ((exceeding = (count + DLIST_NODES(&pool->epages)) -
7a56f31f 384 (pool->avgtotal / pool->avgcnt)) > 0) {
343b3a6c 385 register list_t *epages = &pool->epages;
7a56f31f
MM
386
387 /* Preferably free pages which haven't been used recently */
343b3a6c 388 for (; exceeding > 0 && (page = DLIST_BOTTOM(epages)) != NULL;
7a56f31f 389 exceeding--) {
343b3a6c
MM
390 DLIST_UNLINK(epages, (node_t *)page);
391 pool_page_destroy(page);
7a56f31f
MM
392 }
393 }
394 }
395 } else
460b991b 396 DPRINTF("pool_free", "Invalid pnode_t pointer (%p)", pnode);
7a56f31f
MM
397
398 return NULL;
399}