*** empty log message ***
[mmondor.git] / mmsoftware / mmlib / mmpool.c
CommitLineData
7a56f31f
MM
1/* $Id: mmpool.c,v 1.1 2003/03/28 21:09:29 mmondor Exp $ */
2
3/*
4 * Copyright (C) 2001-2003, Matthew Mondor
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software written by Matthew Mondor.
18 * 4. The name of Matthew Mondor may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY MATTHEW MONDOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL MATTHEW MONDOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34
35
36
37#include <sys/types.h>
38#include <stdlib.h>
39#include <syslog.h>
40
41#include <mmtypes.h>
42#include <mmstring.h>
43#include <mmlist.h>
44#include <mmpool.h>
45
46
47
48
49MMCOPYRIGHT("@(#) Copyright (c) 2001-2003\n\
50\tMatthew Mondor. All rights reserved.\n");
51MMRCSID("$Id: mmpool.c,v 1.1 2003/03/28 21:09:29 mmondor Exp $");
52
53
54
55
56bool pool_init(pool_t *pool, void *(*mallocfunc)(size_t),
57 void (*freefunc)(void *), size_t nodesize, u_int32_t nodesperpage,
58 u_int32_t minpages, u_int32_t maxpages)
59{
60 bool ok = FALSE;
61
62 if (pool != NULL && mallocfunc != NULL && freefunc != NULL &&
63 nodesize != 0 && nodesperpage > 0) {
64 register size_t bps = (size_t)OALIGN_CEIL(sizeof(bpage_t), long);
65 register size_t ns = (size_t)OALIGN_CEIL(nodesize, long);
66 register size_t ps = (bps + ((nodesperpage + 1) * ns));
67
68 pool->magic = 0;
69 pool->malloc = mallocfunc;
70 pool->free = freefunc;
71 pool->nodesize = ns;
72 pool->minpages = minpages;
73 pool->maxpages = maxpages;
74 pool->nodesperpage = nodesperpage;
75 pool->pagesize = ps;
76 pool->avgtotal = pool->avgcnt = minpages;
77 LIST_INIT(&pool->pages);
78 LIST_INIT(&pool->fpages);
79 LIST_INIT(&pool->nodes);
80 /* Allocate minimum number of pages, if needed */
81 for (; minpages > 0; minpages--) {
82 register bpage_t *p;
83
84 if ((p = mallocfunc(ps)) != NULL) {
85 register u_int8_t *ptr, *toptr;
86
87 p->pool = pool;
88 p->magic = MAGIC_PAGE;
89 p->pnodes = nodesperpage;
90 LIST_APPEND(&pool->pages, (node_t *)p);
91 for (ptr = toptr = (u_int8_t *)p, ptr += bps, toptr += ps;
92 ptr + ns < toptr; ptr += ns) {
93 ((pnode_t *)ptr)->page = p;
94 ((pnode_t *)ptr)->magic = 0;
95 LIST_APPEND(&pool->nodes, (node_t *)ptr);
96 }
97 } else
98 break;
99 }
100 pool->magic = MAGIC_POOL;
101 if (minpages == 0)
102 ok = TRUE;
103 else if (minpages < pool->minpages) {
104 pool_destroy(pool);
105 syslog(LOG_NOTICE, "* pool_init() - Out of memory");
106 }
107 } else
108 syslog(LOG_NOTICE, "* pool_init() - Invalid parameters");
109
110 return ok;
111}
112
113
114bool pool_destroy(pool_t *pool)
115{
116 bool ok = FALSE;
117
118 if (POOL_VALID(pool)) {
119 register node_t *p, *t;
120
121 for (p = pool->pages.top; p != NULL; p = t) {
122 t = p->next;
123 ((bpage_t *)p)->magic = 0;
124 pool->free(p);
125 }
126 for (p = pool->fpages.top; p != NULL; p = t) {
127 t = p->next;
128 ((bpage_t *)p)->magic = 0;
129 pool->free(p);
130 }
131 pool->magic = 0;
132 ok = TRUE;
133 } else
134 syslog(LOG_NOTICE, "- pool_destroy() - Invalid pool_t pointer");
135
136 return ok;
137}
138
139
140pnode_t *pool_alloc(pool_t *pool, bool zero)
141{
142 pnode_t *pnode = NULL;
143
144 if (POOL_VALID(pool)) {
145 register pnode_t *pn;
146
147 /* If there are pre-buffered nodes, simply return the first one. */
148 if ((pn = (pnode_t *)pool->nodes.top) != NULL) {
149 LIST_UNLINK(&pool->nodes, (node_t *)pn);
150 pn->page->pnodes--;
151 if (zero) {
152 register bpage_t *p;
153
154 p = pn->page;
155 mm_memclr(pn, pool->nodesize);
156 pn->page = p;
157 }
158 pn->magic = MAGIC_PNODE;
159 pnode = pn;
160 } else {
161 register bpage_t *p = NULL;
162
163 /* No pnode_t left, we need to allocate a new bpage_t to grow and
164 * initialize the new bnode_t objects. First verify if there is
165 * any available page already in our cache, which pool_free()
166 * maintains using statistics, to minimize calls to page
167 * primitives functions. If there are none, allocate a new bpage_t.
168 */
169 if (pool->maxpages == 0 || pool->pages.nodes < pool->maxpages) {
170 if ((p = (bpage_t *)pool->fpages.top) != NULL)
171 LIST_UNLINK(&pool->fpages, (node_t *)p);
172 else
173 p = pool->malloc(pool->pagesize);
174 if (p != NULL) {
175 register u_int8_t *ptr, *toptr;
176 register size_t ns = pool->nodesize;
177 register size_t ps = pool->pagesize;
178 register size_t bps = (size_t)OALIGN_CEIL(sizeof(bpage_t),
179 long);
180
181 p->pool = pool;
182 p->magic = MAGIC_PAGE;
183 p->pnodes = pool->nodesperpage;
184 LIST_APPEND(&pool->pages, (node_t *)p);
185 for (ptr = toptr = (u_int8_t *)p, ptr += bps, toptr += ps;
186 ptr + ns < toptr; ptr += ns) {
187 ((pnode_t *)ptr)->page = p;
188 ((pnode_t *)ptr)->magic = 0;
189 LIST_APPEND(&pool->nodes, (node_t *)ptr);
190 }
191 /* Now grab first pnode_t */
192 pn = (pnode_t *)pool->nodes.top;
193 LIST_UNLINK(&pool->nodes, (node_t *)pn);
194 p->pnodes--;
195 if (zero)
196 mm_memclr(pn, ns);
197 pn->magic = MAGIC_PNODE;
198 pn->page = p;
199 pnode = pn;
200 } else
201 syslog(LOG_NOTICE, "* pool_alloc() - Out of memory");
202 } else
203 syslog(LOG_NOTICE, "- pool_alloc() - Out of nodes (%u total)",
204 pool->nodesperpage * pool->maxpages);
205 }
206 } else
207 syslog(LOG_NOTICE, "* pool_alloc() - Invalid pool_t pointer");
208
209 return pnode;
210}
211
212
213pnode_t *pool_free(pnode_t *pnode)
214{
215 if (PNODE_VALID(pnode)) {
216 register bpage_t *p = pnode->page;
217 register pool_t *pool = p->pool;
218 register u_int32_t exceeding;
219
220 /* Efficiently return this node in the free list. Insert it so that
221 * we favor reusing recently used ones.
222 */
223 pnode->magic = 0;
224 LIST_INSERT(&pool->nodes, (node_t *)pnode);
225 p->pnodes++;
226 if ((pool->minpages < pool->maxpages) ||
227 (pool->minpages == 0 && pool->maxpages == 0)) {
228 register u_int32_t pages = pool->pages.nodes;
229
230 /* This is a pool_t which can shrink, book-keep statistics on
231 * average pages usage.
232 */
233 pool->avgtotal += pages;
234 pool->avgcnt++;
235 if (pool->avgcnt > pool->nodesperpage * 3) {
236 pool->avgcnt = 1;
237 pool->avgtotal = pages;
238 }
239
240 if (p->pnodes == pool->nodesperpage && pool->minpages < pages) {
241 register u_int8_t *ptr, *toptr;
242 register size_t ns = pool->nodesize;
243
244 /* All pnode_t objects belonging to this bpage_t were freed.
245 * Swap the page to the cache to be freed. We also need
246 * to sequencially unlink all the pnode_t objects this page
247 * supplied in the free nodes list_t.
248 */
249 for (ptr = toptr = (u_int8_t *)p,
250 ptr += (size_t)OALIGN_CEIL(sizeof(bpage_t), long),
251 toptr += pool->pagesize; ptr + ns < toptr;
252 ptr += ns)
253 LIST_UNLINK(&pool->nodes, (node_t *)ptr);
254 /* Insert to preferably reuse recently used pages */
255 p->magic = 0;
256 LIST_SWAP(&pool->fpages, &pool->pages, (node_t *)p, TRUE);
257 }
258 /* Do statistics suggest that we should shrink the pool? If so,
259 * free pages from our cache back to the system.
260 */
261 if ((exceeding = (pool->pages.nodes + pool->fpages.nodes) -
262 (pool->avgtotal / pool->avgcnt)) > 0) {
263 register list_t *fpages = &pool->fpages;
264 register node_t *n;
265
266 /* Preferably free pages which haven't been used recently */
267 for (; exceeding > 0 && (n = fpages->bottom) != NULL;
268 exceeding--) {
269 LIST_UNLINK(fpages, n);
270 pool->free(p);
271 }
272 }
273 }
274 } else
275 syslog(LOG_NOTICE, "* pool_free() - Invalid pnode_t pointer");
276
277 return NULL;
278}