Modifications to use the new mmlimitrate(3) API
[mmondor.git] / mmsoftware / mmlib / mmserver.c
CommitLineData
da634739 1/* $Id: mmserver.c,v 1.19 2003/10/23 01:01:25 mmondor Exp $ */
47071c2b
MM
2
3/*
5eb34fba 4 * Copyright (C) 2000-2003, Matthew Mondor
47071c2b
MM
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software written by Matthew Mondor.
18 * 4. The name of Matthew Mondor may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY MATTHEW MONDOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL MATTHEW MONDOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34
35
36
37/* HEADERS */
38
39#include <sys/types.h>
40#include <sys/wait.h>
41#include <sys/stat.h>
42#include <fcntl.h>
43#include <stdlib.h>
44#include <unistd.h>
45#include <stdio.h>
46
47#include <netdb.h>
48#include <netinet/in.h>
49#include <netinet/in_systm.h>
50#include <netinet/ip.h>
51#include <netinet/tcp.h>
52#include <sys/socket.h>
53#include <arpa/inet.h>
54#include <arpa/nameser.h>
55#include <resolv.h>
56
57#include <syslog.h>
58
59#include <pth.h>
60#include <signal.h>
61#include <sys/poll.h>
62#include <time.h>
63
64#include <mmstr.h>
65#include <mmstring.h>
66#include <mmfd.h>
67#include <mmlist.h>
904cd663
MM
68#include <mmpool.h>
69#include <mmhash.h>
47071c2b
MM
70#include <mmserver.h>
71#include <mmreadcfg.h>
62332f2c 72#include <mmlimitrate.h>
2556ec01 73#include <mmlog.h>
47071c2b
MM
74
75
76
77
5eb34fba 78MMCOPYRIGHT("@(#) Copyright (c) 2002-2003\n\
47071c2b 79\tMatthew Mondor. All rights reserved.\n");
da634739 80MMRCSID("$Id: mmserver.c,v 1.19 2003/10/23 01:01:25 mmondor Exp $");
47071c2b
MM
81
82
83
84
85/* DEFINITIONS */
86
87
88
89
90/* STATIC PROTOTYPES */
91
92static void sighandler(int);
93static struct iface *parse_ifaces(char *, char *);
e2a16cfa 94static void *phandleclient(void *);
47071c2b 95static void writepidfile(const char *);
e2a16cfa 96static void *async_thread(void *);
6bd3b87c 97static bool spawn_async_procs(uid_t, gid_t *, int, size_t);
47071c2b
MM
98static void async_proc_main(int);
99static void async_resolvehostname(struct async_msg *);
100
904cd663
MM
101static u_int32_t clnode_keyhash(const void *, size_t);
102static int clnode_keycmp(const void *, const void *, size_t);
103static void *clnode_expire_thread(void *);
104static bool clnode_expire_thread_iterator(hashnode_t *, void *);
105
47071c2b
MM
106
107
108
109/* GLOBAL VARIABLES */
110
111static int (*handleclientfunc)(unsigned long, int, clientlistnode *,
112 struct iface *, struct async_clenv *);
904cd663 113static pth_mutex_t ctable_lock, ppool_lock;
7a56f31f 114static pool_t cpool, ppool;
904cd663 115static hashtable_t ctable;
47071c2b
MM
116static struct async_env *async = NULL;
117
118/* Useful so that daemons can log standard disconnection status easily */
f36e2a66 119const char *const mms_reasons[] = {
47071c2b
MM
120 "NORMAL",
121 "INPUT_TIMEOUT",
122 "OUTPUT_TIMEOUT",
123 "INPUT_ERROR",
124 "OUTPUT_ERROR",
125 "RESOURCE_ERROR",
126 "NOT_AVAILABLE",
127 "MANY_ERRORS",
128 "CONNECTION_RATE_EXCEEDED", /* These three used internally */
129 "NUMBER_OF_ADDRESSES_EXCEEDED",
130 "CONNECTIONS_PER_ADDRESS_EXCEEDED",
131 "UNKNOWN",
132 NULL
133};
134
135
136
137
138/* MAIN DAEMON PROGRAM */
139
140/* Before calling this function, the async_init() and async_init_pth()
141 * functions should have been called. Before any async_*() or tcp_server()
142 * calls the process is expected to have called make_daemon().
143 */
144void
145tcp_server(char *message, char *server_names, char *listen_ips, uid_t uid,
146 gid_t *gids, int ngids, long maxips, long maxperip, long ratemax,
147 long rateper, long timeout, int port, bool resolve,
148 int (*handleclient1)(unsigned long, int, clientlistnode *,
149 struct iface *, struct async_clenv *))
150{
151 struct sockaddr addr;
152 struct sockaddr_in server, *sinaddr;
153 socklen_t addrl;
154 clientlistnode *clnode;
904cd663 155 int msgsock, ret, msglen, nifaces, nifaces2, i, reason;
47071c2b
MM
156 bool ok;
157 struct pollfd *fds;
158 struct ifacendx *fdsi;
159 struct iface *ifaces, *tif;
160 unsigned long id;
904cd663
MM
161 pth_t thread, clnode_thread;
162 pth_attr_t threadattr, clnode_threadattr;
47071c2b
MM
163
164 sinaddr = (struct sockaddr_in *)&addr;
165 handleclientfunc = handleclient1;
166 id = (unsigned long)time(NULL);
167 msgsock = -1;
168 msglen = mm_strlen(message);
169 fds = NULL;
170 fdsi = NULL;
171 ifaces = NULL;
172
173 /* Pth related */
174 threadattr = pth_attr_new();
175 pth_attr_set(threadattr, PTH_ATTR_JOINABLE, FALSE);
904cd663
MM
176 clnode_threadattr = pth_attr_new();
177 pth_attr_set(clnode_threadattr, PTH_ATTR_JOINABLE, TRUE);
178 pth_mutex_init(&ctable_lock);
7a56f31f 179 pth_mutex_init(&ppool_lock);
47071c2b
MM
180
181 /* Used by resolve_hostname() */
7a56f31f 182 if (!mmstrinit(malloc, free, 16384)) {
460b991b 183 DPRINTF("tcp_server", "mmstrinit()");
47071c2b
MM
184 closelog();
185 exit(-1);
186 }
187
188 if ((ifaces = parse_ifaces(server_names, listen_ips)) == NULL) {
189 closelog();
190 mmstrexit();
191 exit(-1);
192 }
193
7a56f31f
MM
194 if (!pool_init(&cpool, malloc, free, sizeof(clientlistnode),
195 65536 / sizeof(clientlistnode), 0, 0) ||
196 !pool_init(&ppool, malloc, free, sizeof(phandleinfo),
904cd663 197 16384 / sizeof(phandleinfo), 0, 0) ||
097ff059
MM
198 !hashtable_init(&ctable, maxips, 1, malloc, free, clnode_keycmp,
199 clnode_keyhash, FALSE) ||
904cd663
MM
200 ((clnode_thread = pth_spawn(clnode_threadattr,
201 clnode_expire_thread, &rateper)) == NULL)) {
460b991b 202 DPRINTF("tcp_server", "Out of memory");
47071c2b
MM
203 closelog();
204 mmstrexit();
205 free(ifaces);
206 exit(-1);
207 }
208
209 /* Bind the port we should listen to, with all specified addresses */
917e9cbb 210 for (ret = nifaces = 0, tif = ifaces; *(tif->hostname); tif++) {
47071c2b
MM
211 if ((tif->sock = socket(AF_INET, SOCK_STREAM, 0)) > -1) {
212 int i1 = 1;
917e9cbb 213
47071c2b
MM
214 if ((setsockopt(tif->sock, SOL_SOCKET, SO_REUSEADDR, &i1,
215 sizeof(int))) == -1)
460b991b 216 DPRINTF("tcp_server", "Error on setsockopt(%s:%d) - (%s)",
47071c2b
MM
217 tif->address_str, port, tif->hostname);
218 mm_memclr(&server, sizeof(struct sockaddr_in));
219 server.sin_family = AF_INET;
220 server.sin_addr = tif->address.sin_addr;
221 server.sin_port = htons(port);
222 if ((bind(tif->sock, (struct sockaddr *)(void *)&server,
223 sizeof(struct sockaddr_in))) == 0)
224 nifaces++;
225 else {
226 syslog(LOG_NOTICE,
2556ec01 227 "tcp_server() - Error on bind(%s:%d) - (%s)",
47071c2b
MM
228 tif->address_str, port, tif->hostname);
229 close(tif->sock);
230 tif->sock = -1;
231 }
232 }
47071c2b
MM
233 }
234
235 /* Drop privileges and start listening to all bound ports */
f36e2a66 236 if (nifaces > 0) {
47071c2b 237 if (mmdropprivs(uid, gids, ngids)) {
917e9cbb 238 for (tif = ifaces; *(tif->hostname); tif++) {
47071c2b
MM
239 if (tif->sock != -1) {
240 if ((listen(tif->sock, maxips * maxperip)) != 0) {
460b991b 241 DPRINTF("tcp_server", "listen(%s)", tif->hostname);
47071c2b
MM
242 close(tif->sock);
243 tif->sock = -1;
244 nifaces--;
245 } else
246 syslog(LOG_NOTICE, "Listening on [%s:%d] - (%s)",
247 tif->address_str, port, tif->hostname);
248 }
47071c2b
MM
249 }
250 } else {
251 syslog(LOG_NOTICE,
2556ec01 252 "tcp_server() - Cannot change uid and gid to safe privs");
47071c2b
MM
253 ret = -1;
254 }
255 } else {
256 syslog(LOG_NOTICE,
2556ec01 257 "tcp_server() - no interfaces (left) to listen to");
47071c2b
MM
258 ret = -1;
259 }
260
261 if (!ret) {
262 /* Setup our poll() array, with corresponding index array */
263 if ((fds = malloc((sizeof(struct pollfd) * nifaces) +
264 (sizeof(struct ifacendx) * nifaces))) != NULL) {
917e9cbb
MM
265 for (fdsi = (struct ifacendx *)&(fds[nifaces]), i = 0,
266 tif = ifaces;
267 *(tif->hostname); tif++) {
47071c2b
MM
268 if (tif->sock != -1) {
269 fds[i].fd = tif->sock;
270 fds[i].events = POLLIN;
271 fds[i].revents = 0;
272 fdsi[i].iface = tif;
273 i++;
274 }
47071c2b
MM
275 }
276 } else {
460b991b 277 DPRINTF("tcp_server", "Out of memory");
47071c2b
MM
278 ret = -1;
279 }
280 } else
281 ret = -1;
282
283 if (ret) {
284 free(ifaces);
285 mmstrexit();
904cd663
MM
286 if (clnode_thread != NULL) {
287 pth_abort(clnode_thread);
288 pth_join(clnode_thread, NULL);
289 }
62332f2c
MM
290 if (HASHTABLE_VALID(&ctable))
291 hashtable_destroy(&ctable, FALSE);
292 if (POOL_VALID(&ppool))
293 pool_destroy(&ppool);
294 if (POOL_VALID(&cpool))
295 pool_destroy(&cpool);
47071c2b
MM
296 exit(ret);
297 }
298
299 /* Start answering and dispatching connections */
300 syslog(LOG_NOTICE, "Started for uid: %d", uid);
301 for (;;) {
302 if ((nifaces2 = pth_poll(fds, nifaces, -1)) > 0) {
303 /* Loop but once only, and only for long as nifaces2 times.
304 * Use our fast index to reference to the interface structure.
305 */
306 for (i = 0; i < nifaces && nifaces2; i++) {
307 if (fds[i].revents & POLLIN) {
308 nifaces2--;
309 addrl = sizeof(struct sockaddr);
310 if ((msgsock = pth_accept(fds[i].fd, &addr, &addrl))
311 != -1) {
904cd663
MM
312 /* Make sure that we respect connection and rate
313 * limits.
47071c2b 314 */
47071c2b
MM
315 ok = FALSE;
316 reason = MMS_NORMAL;
904cd663 317 pth_mutex_acquire(&ctable_lock, FALSE, NULL);
1c253c13
MM
318 if ((clnode = (clientlistnode *)hashtable_lookup(
319 &ctable, &sinaddr->sin_addr.s_addr,
904cd663
MM
320 sizeof(u_int32_t))) == NULL) {
321 /* Create new node */
322 if (HASHTABLE_NODES(&ctable) < maxips) {
7a56f31f
MM
323 if ((clnode = (clientlistnode *)pool_alloc(
324 &cpool, FALSE)) != NULL) {
47071c2b 325 clnode->hostname = NULL;
62332f2c
MM
326 clnode->connections = 0;
327 if (ratemax != 0)
328 LR_INIT(&clnode->lr, ratemax,
329 (time_t)rateper, time(NULL));
47071c2b
MM
330 clnode->timeout = (timeout * 1000);
331 clnode->client = addr;
332 clnode->resolve = resolve;
904cd663
MM
333 hashtable_link(&ctable,
334 (hashnode_t *)clnode,
335 &((struct sockaddr_in *)&clnode->
336 client)->sin_addr.s_addr,
337 sizeof(u_int32_t));
47071c2b 338 } else
460b991b 339 DPRINTF("tcp_server", "Out of memory");
904cd663
MM
340 } else
341 reason = MMS_ADDRESSES_EXCEEDED;
342 }
343 if (clnode != NULL) {
344 /* Either we found an existing node or we created
345 * it successfully
346 */
347 if (clnode->connections < maxperip) {
348 if (ratemax != 0) {
62332f2c
MM
349 if (!(ok = lr_allow(&clnode->lr, 1, 0,
350 FALSE)))
904cd663
MM
351 reason = MMS_RATE_EXCEEDED;
352 } else
353 ok = TRUE;
354 } else
355 reason = MMS_CONPERADDRESS_EXCEEDED;
47071c2b 356 }
904cd663 357 pth_mutex_release(&ctable_lock);
47071c2b
MM
358
359 if (ok) {
360 phandleinfo *phi;
361
362 /* Start thread to serve the client */
7a56f31f
MM
363 pth_mutex_acquire(&ppool_lock, FALSE, NULL);
364 phi = (phandleinfo *)pool_alloc(&ppool, FALSE);
365 pth_mutex_release(&ppool_lock);
47071c2b
MM
366 if (phi != NULL) {
367 id++;
368 phi->id = id;
369 phi->socket = msgsock;
370 phi->clnode = clnode;
371 /* Indexed reference */
372 phi->iface = fdsi[i].iface;
373 if ((thread = pth_spawn(threadattr,
374 phandleclient,
375 phi)) != NULL)
376 clnode->connections++;
377 else {
378 if (phi != NULL) {
7a56f31f 379 pth_mutex_acquire(&ppool_lock, FALSE,
47071c2b 380 NULL);
7a56f31f
MM
381 pool_free((pnode_t *)phi);
382 pth_mutex_release(&ppool_lock);
47071c2b 383 }
460b991b
MM
384 DPRINTF("tcp_server",
385 "Error launching thread");
47071c2b
MM
386 }
387 } else
460b991b 388 DPRINTF("tcp_server", "Out of memory");
47071c2b
MM
389 } else {
390 char ipaddr[20];
391
392 /* Close down connection with client
393 * immediately, sending message.
394 */
395 write(msgsock, message, msglen);
396 shutdown(msgsock, 2);
397 close(msgsock);
398 mm_strcpy(ipaddr, "0.0.0.0");
399 inet_ntop(AF_INET, &(sinaddr->sin_addr), ipaddr,
400 19);
401 if (reason != MMS_NORMAL)
2556ec01 402 syslog(LOG_NOTICE, "tcp_server() - [%s] - %s",
5eb34fba 403 ipaddr, MMS_RSTRING(reason));
47071c2b
MM
404 }
405
904cd663 406 pth_mutex_release(&ctable_lock);
47071c2b
MM
407
408 } else
460b991b 409 DPRINTF("tcp_server", "pth_accept()");
47071c2b
MM
410 }
411 }
412 }
413 }
414
415 free(ifaces);
416 mmstrexit();
904cd663
MM
417 pth_abort(clnode_thread);
418 pth_join(clnode_thread, NULL);
419 hashtable_destroy(&ctable, FALSE);
7a56f31f
MM
420 pool_destroy(&ppool);
421 pool_destroy(&cpool);
47071c2b 422 /* Unrequired for Pth (non-existing even)
7a56f31f 423 * pth_mutex_destroy(&apool_lock);
904cd663 424 * pth_mutex_destroy(&ctable_lock);
7a56f31f 425 * pth_mutex_destroy(&ppool_lock);
47071c2b
MM
426 */
427
428 exit(ret);
429}
430
431
432/* Writes our process ID number to specified file. To be called before
433 * chroot(2) or dropping privileges.
434 */
435static void
436writepidfile(const char *file)
437{
438 char str[16];
439 int fd;
440
441 if ((fd = open(file, O_CREAT | O_TRUNC | O_WRONLY, 0600)) != -1) {
f36e2a66 442 snprintf(str, 15, "%d\n", getpid());
47071c2b
MM
443 write(fd, str, mm_strlen(str));
444 close(fd);
445 } else
460b991b 446 DPRINTF("writepidfile", "open(%s)", file);
47071c2b
MM
447}
448
449
450/* Uses fork() to become a daemon, and detaches from the current tty.
451 * Returns either successful or not
452 */
453bool
454make_daemon(const char *pidfile, const char *jail)
455{
456 int pid;
457
458#ifdef NODETACH
459 pid = getpid();
460#else
461 if ((pid = fork()) != -1)
462#endif
463 {
464
465#ifndef NODETACH
466 if (pid > 0) {
467 /* Parent */
468 exit(0);
469 } else
470#endif
471 {
472 int fd;
473 struct sigaction act;
474
475 /* Child */
476 setsid();
477 chdir("/");
478 if ((fd = open("/dev/null", O_RDWR)) != -1) {
479 dup2(fd, 0);
480 dup2(fd, 1);
481 dup2(fd, 2);
482 if (fd > 2)
483 close(fd);
484 }
485 writepidfile(pidfile);
486
487 if (jail && *jail) {
488 if ((chroot(jail)) == -1) {
489 syslog(LOG_NOTICE,
2556ec01 490 "make_daemon() - Could not chroot(2) to '%s'",
47071c2b
MM
491 jail);
492 exit(-1);
493 }
494 chdir("/");
495 }
496
497 /* Setup our break handler and ignore wanted signals */
498 act.sa_handler = sighandler;
499 act.sa_flags = SA_NOCLDWAIT;
500 sigemptyset(&act.sa_mask);
501 sigaction(SIGTERM, &act, NULL);
502 sigaction(SIGCHLD, &act, NULL);
503 sigaction(SIGSEGV, &act, NULL);
504 signal(SIGTTOU, SIG_IGN);
505 signal(SIGTTIN, SIG_IGN);
506 signal(SIGTSTP, SIG_IGN);
507 signal(SIGPIPE, SIG_IGN);
508
509 umask(0);
510
0d8fc7f8
MM
511#ifndef __GLIBC__
512 setproctitle("Main threaded process");
513#endif
514
47071c2b
MM
515 return (TRUE);
516 }
517 }
518
519 return (FALSE);
520}
521
522
523static void
524sighandler(int sig)
525{
526 switch (sig) {
527 case SIGSEGV:
528 syslog(LOG_NOTICE, "Exiting (SIGSEGV)");
529 kill(0, SIGTERM);
530 exit(0);
531 break;
532 case SIGTERM:
533 syslog(LOG_NOTICE, "Exiting (SIGTERM)");
534 signal(SIGTERM, SIG_IGN);
535 kill(0, SIGTERM);
536 exit(0);
537 break;
538 case SIGCHLD:
539 {
540 int s = 0;
541
542 while ((wait3(&s, WNOHANG, NULL)) > 0) ;
543 }
544 }
545}
546
547
548static struct iface *
549parse_ifaces(char *hostnames, char *addresses)
550{
551 struct iface *ifaces;
552 char *hosts[65], *ips[65];
553 struct sockaddr_in saddr;
554 int n, i;
555
556 ifaces = NULL;
557
f36e2a66
MM
558 if (hostnames != NULL && addresses != NULL && *hostnames != '\0' &&
559 *addresses != '\0') {
47071c2b
MM
560 if ((n = mm_straspl(hosts, hostnames, 64)) ==
561 mm_straspl(ips, addresses, 64)) {
562 if ((ifaces = malloc(sizeof(struct iface) * (n + 1))) != NULL) {
563 /* Setup our array */
564 for (i = 0; i < n; i++) {
565 mm_memclr(&saddr, sizeof(struct sockaddr_in));
566 if ((inet_aton(ips[i], &(saddr.sin_addr))) == 1) {
567 mm_strncpy(ifaces[i].hostname, hosts[i], 64);
568 mm_strncpy(ifaces[i].address_str, ips[i], 15);
569 ifaces[i].sock = -1;
570 ifaces[i].address = saddr;
571 *(ifaces[i + 1].hostname) = '\0';
572 } else {
573 syslog(LOG_NOTICE,
2556ec01 574 "parse_ifaces() - Invalid address [%s]",
47071c2b
MM
575 ips[i]);
576 free(ifaces);
577 ifaces = NULL;
578 break;
579 }
580 }
581 }
582 } else
583 syslog(LOG_NOTICE,
2556ec01 584 "parse_ifaces() - Number of hostnames and addresses mismatch");
47071c2b
MM
585 }
586
587 return (ifaces);
588}
589
590
591/* SERVER (THREAD) */
592
593/* This is started by pth_spawn(), it calls the user handler function, which
594 * only has to care about serving the client.
595 */
596static void *
597phandleclient(void *args)
598{
599 phandleinfo *phi;
600 int socket, ret;
601 clientlistnode *clnode;
602 char *tmp;
603 struct iface *iface;
604 struct async_clenv *aclenv;
605 unsigned long id;
606
607 /* Get and discard our passed parameters */
608 phi = (phandleinfo *)args;
609 id = phi->id;
610 socket = phi->socket;
611 clnode = phi->clnode;
612 iface = phi->iface;
7a56f31f
MM
613 pth_mutex_acquire(&ppool_lock, FALSE, NULL);
614 pool_free((pnode_t *)phi);
615 pth_mutex_release(&ppool_lock);
47071c2b
MM
616
617 ret = 0;
618
619 if ((aclenv = async_open_clenv())) {
620 if (clnode->resolve) {
621 /* We want to resolve the client's IP address' hostname, but not
622 * if it already was done for another client currently logged on
623 * from that same address.
624 */
625 tmp = NULL;
626 /* We need to determine if hostname should be resolved. */
627 if (clnode->hostname == NULL) {
628 /* It seems so, so let's do it without clobbering the mutex */
629 tmp = resolve_hostname(aclenv, &(clnode->client));
630 }
631 if (tmp) {
632 /* Lock the mutex for a very short moment */
904cd663 633 pth_mutex_acquire(&ctable_lock, FALSE, NULL);
47071c2b 634 /* Verify again for NULL to avoid a potential memory leak */
62332f2c
MM
635 if(clnode->hostname == NULL)
636 clnode->hostname = tmp;
637 else
638 tmp = mmstrfree(tmp);
904cd663 639 pth_mutex_release(&ctable_lock);
47071c2b
MM
640 }
641 }
642
643 /* Handle the client */
644 ret = handleclientfunc(id, socket, clnode, iface, aclenv);
645
646 async_close_clenv(aclenv);
647 } else
460b991b 648 DPRINTF("phandleclient", "async_open_clenv()");
47071c2b
MM
649
650 /* Cleanup */
651
652 /* Close connection with client */
653 if (socket != -1) {
654 shutdown(socket, 2);
655 close(socket);
656 }
657
904cd663
MM
658 /* Decrease our connection/refcount counter, and let our ctable thread
659 * decide if/when to uncache our node.
47071c2b 660 */
904cd663 661 pth_mutex_acquire(&ctable_lock, FALSE, NULL);
62332f2c
MM
662 if (clnode->connections > 0)
663 clnode->connections--;
904cd663 664 pth_mutex_release(&ctable_lock);
47071c2b
MM
665
666 /* kthxbye :) */
667 pth_exit(&ret);
668
669 /* NOTREACHED */
670 return (NULL);
671}
672
673
674
675/* This consists of a general purpose thread which can serve real
676 * asynchroneous functions via another thread, suitable to use for functions
677 * which can block the whole process when using non-preemptive threads like
678 * the Pth library provides. Pth message ports are used to communicate with
679 * this device in a way that processes waiting for results only block
680 * the requesting thread. The function internally uses unix datagrams to
681 * similarly communicate arguments and obtain back results from a free process
682 * in the asynchroneous processes pool. Another advantage of this technique is
683 * that on SMP systems the various processors can now be taken advantage of.
684 * The caller should of course expect data rather than pointers to be used for
685 * both arguments and return values since pointers are only valid for the
686 * current process.
687 *
688 * Drawbacks exist though; socket send() and recv() calls are internally
689 * required, as well as memory copy operations. Moreover, two new
690 * filedescriptor are required in the main process for each asynchroneous
691 * process in our pool.
692 * It should be used where necessary, like calculating MD5 hashes, resolving
693 * hostnames and evaluating directory tree sizes recursively, etc.
694 *
695 * It would have been possible to use different datagram sizes to transfer
696 * arguments and results to/from the other processes, but because of the way
697 * the Pth AmigaOS-style messages work, a decision was made so that unions are
698 * used by async functions and the whole structure is transfered back and
699 * forth.
700 */
701
702/* ARGSUSED */
703static void *
704async_thread(void *args)
705{
7a56f31f 706 list_t queue, *freeprocs = &async->freeprocs;
47071c2b
MM
707 struct async_idx *idx = NULL;
708 struct async_proc *proc;
709 struct pollfd *pfds = NULL;
710 char *ptr;
711 int i, nfds;
712 size_t idx_size, pfd_size;
713
714 /* freeprocs is our pool of available async processes, queue is our
715 * queue of requests waiting to be processed and replied to.
716 * We use the idx array to fast-index process node via an fd which
717 * status changed during poll().
718 */
719
5eb34fba 720 /* Initialize our queue, fd->process index and poll array */
e2a16cfa 721 i = 0;
d2558e27
MM
722 DLIST_INIT(&queue);
723 DLIST_FOREACH(freeprocs, proc)
e2a16cfa
MM
724 if (i < proc->sock)
725 i = proc->sock;
47071c2b 726 nfds = freeprocs->nodes;
cddd431b
MM
727 idx_size = (size_t)OALIGN_CEIL(sizeof(struct async_idx) * (i + 1), long);
728 pfd_size = (size_t)OALIGN_CEIL(sizeof(struct pollfd) * (nfds + 1), long);
47071c2b
MM
729 if ((ptr = calloc(idx_size + pfd_size, 1)) != NULL) {
730 idx = (struct async_idx *)ptr;
731 ptr += idx_size;
732 pfds = (struct pollfd *)ptr;
733 } else {
734 /* Fatal error */
460b991b 735 DPRINTF("async_thread", "calloc(%d)", idx_size + pfd_size);
47071c2b
MM
736 pth_exit(NULL);
737 }
e2a16cfa 738 i = 0;
d2558e27 739 DLIST_FOREACH(freeprocs, proc) {
47071c2b
MM
740 idx[proc->sock].aproc = proc;
741 pfds[i].fd = proc->sock;
742 pfds[i].events = POLLIN | POLLERR;
e2a16cfa 743 i++;
47071c2b
MM
744 }
745
746 for (;;) {
747 char *omsg;
748
749 /* First check for new messages and queue them in our queue list if
750 * any, but only wait for them if all processes are free (we don't
751 * expect any results). Special note: Pth will only notify event for
752 * a port if it received a message while it was empty.
753 */
754 if ((pth_msgport_pending(async->port))) {
755 while ((omsg = (char *)pth_msgport_get(async->port)) != NULL)
d2558e27 756 DLIST_APPEND(&queue, (node_t *)(omsg - sizeof(pnode_t)));
47071c2b
MM
757 } else if (freeprocs->nodes == async->nprocs) {
758 pth_wait(async->ring);
759 while ((omsg = (char *)pth_msgport_get(async->port)) != NULL)
d2558e27 760 DLIST_APPEND(&queue, (node_t *)(omsg - sizeof(pnode_t)));
47071c2b
MM
761 }
762
763 /* Verify for any available processes to dispatch requests to, and
764 * if so, dispatch requests from the queue until no more processes
765 * are available or the queue is empty.
766 */
d2558e27 767 if (DLIST_NODES(&queue) > 0 && DLIST_NODES(freeprocs) > 0) {
47071c2b
MM
768 struct async_proc *p;
769 struct async_msg *m;
770 size_t len;
771
d2558e27
MM
772 while ((p = DLIST_TOP(freeprocs)) != NULL &&
773 (m = DLIST_TOP(&queue)) != NULL) {
47071c2b
MM
774 if (m->func_id < async->nfuncs) {
775 len = async->funcs[m->func_id].msg_len;
776 idx[p->sock].aclenv = m->aclenv;
777 if ((pth_send(p->sock, m, len, 0)) == len)
d2558e27 778 DLIST_UNLINK(freeprocs, (node_t *)p);
47071c2b 779 else
460b991b
MM
780 DPRINTF("async_thread", "send(%d:%d)",
781 p->sock, (int)len);
47071c2b 782 } else
460b991b 783 DPRINTF("async_thread", "Unknown function %u", m->func_id);
d2558e27 784 DLIST_UNLINK(&queue, (node_t *)m);
47071c2b
MM
785 }
786 }
787
788 /* Wait for results from our async processes via their socket,
789 * but be sure to break when new Pth messages are available.
790 * If new results are obtained, reply back to the caller thread.
791 */
792 for (;;) {
793 int selr, i;
794
795 if ((selr = pth_poll_ev(pfds, nfds, -1, async->ring)) > 0) {
796 for (i = 0; selr > 0 && i < nfds; i++) {
797 if (pfds[i].revents & POLLERR) {
798 /* If this happens something is really wrong, exit */
2556ec01 799 syslog(LOG_NOTICE, "async_thread() - POLLERR(%d)",
47071c2b
MM
800 pfds[i].fd);
801 kill(0, SIGTERM);
802 for (;;)
803 sleep(30);
804 }
805 if (pfds[i].revents & POLLIN) {
806 int fd = pfds[i].fd;
807 size_t len;
808 struct async_proc *p = idx[fd].aproc;
809 struct async_clenv *e = idx[fd].aclenv;
810
811 /* Results, reply message back to client thread and
812 * return this process node in the free pool.
813 */
814 if ((len = pth_recv(fd, e->msg, async->msg_len,
815 MSG_WAITALL)) <
816 sizeof(struct async_msg))
460b991b
MM
817 DPRINTF("async_thread", "recv(%d:%d)",
818 fd, (int)async->msg_len);
47071c2b 819 pth_msgport_reply(&(e->msg->msg));
d2558e27 820 DLIST_APPEND(freeprocs, (node_t *)p);
47071c2b
MM
821 selr--;
822 }
823 }
824 }
825 if ((pth_event_occurred(async->ring)))
826 /* Other requests we must queue immediately */
827 break;
828 }
829 }
830
831 /* NOTREACHED */
5eb34fba 832 pth_exit(NULL);
47071c2b
MM
833 return (NULL);
834}
835
836
837/* This function should be called at early application startup to setup
838 * the asynchroneous pool of processes. async_init_pth() call may then
839 * be called later on after pth_init() to initialize the client-side
840 * asynchroneous context (required before using async_open_clenv()).
841 * This function is assumed to be called once only, and it's effects to
842 * only be discarded when the main process exits.
843 */
844bool
845async_init(struct async_func *funcs, int procs, uid_t uid, gid_t *gids,
846 int ngids)
847{
848 unsigned int i, nfuncs;
849 size_t funcs_len, msg_len, env_len;
850 bool res = FALSE;
851
852 /* Evaluate required space to hold user functions plus our resolve one */
853 for (i = 0; funcs[i].func; i++) ;
854 nfuncs = i + 1;
855 funcs_len = sizeof(struct async_func) * (nfuncs + 1);
856
857 /* Evaluate maximum message size we should expect for arguments */
858 msg_len = sizeof(struct async_resolvehostname_msg);
859 for (i = 0; i < nfuncs; i++) {
860 if (msg_len < funcs[i].msg_len)
861 msg_len = funcs[i].msg_len;
862 }
863
864 /* Necessary allocations */
cddd431b
MM
865 env_len = (size_t)OALIGN_CEIL(sizeof(struct async_env), long);
866 funcs_len = (size_t)OALIGN_CEIL(funcs_len, long);
867 msg_len = (size_t)OALIGN_CEIL(msg_len, long);
47071c2b 868 if ((async = calloc(env_len + funcs_len + msg_len, 1)) != NULL) {
7a56f31f
MM
869 if (pool_init(&async->freeprocs_pool, malloc, free,
870 sizeof(struct async_proc), procs, 1, 1)) {
47071c2b
MM
871 register char *ptr = (char *)async;
872
873 ptr += env_len;
874 async->funcs = (struct async_func *)ptr;
875 ptr += funcs_len;
876 async->msg = (struct async_msg *)ptr;
877 async->nfuncs = nfuncs + 1;
878 async->nprocs = procs;
879 async->msg_len = msg_len;
880 /* Fill in functions */
881 async->funcs[0].func = async_resolvehostname;
882 async->funcs[0].msg_len = sizeof(struct async_resolvehostname_msg);
883 for (i = 0; i < nfuncs; i++) {
884 async->funcs[i + 1].func = funcs[i].func;
885 async->funcs[i + 1].msg_len = funcs[i].msg_len;
886 }
d2558e27 887 DLIST_INIT(&async->freeprocs);
47071c2b
MM
888 res = TRUE;
889 } else
460b991b 890 DPRINTF("async_init", "pool_init(freeprocs_pool)");
47071c2b 891 } else
460b991b 892 DPRINTF("async_init", "calloc(%d)", env_len + funcs_len + msg_len);
47071c2b
MM
893
894 if (res) {
895 /* Start async slave processes */
6bd3b87c 896 if (!spawn_async_procs(uid, gids, ngids, msg_len)) {
460b991b 897 DPRINTF("async_init", "spawn_async_procs()");
47071c2b
MM
898 res = FALSE;
899 }
900 }
901
902 if (!res) {
903 if (async) {
7a56f31f 904 pool_destroy(&async->freeprocs_pool);
47071c2b
MM
905 free(async);
906 async = NULL;
907 }
908 }
909
910 return (res);
911}
912
913
914/* This function is used by an application after pth_init() to initialize
915 * the asynchroneous thread device/server, which will dispatch requests to
916 * the asynchroneous pool of processes in a thread-safe manner (without
917 * blocking the whole process. async_init() should have been called first.
918 */
919bool
920async_init_pth(void)
921{
922 size_t env_len;
923 bool res = FALSE;
924
925 if (async) {
926
927 res = TRUE;
7a56f31f 928 pth_mutex_init(&async->apool_lock);
47071c2b 929 /* Setup our fast-allocation pool for client-side async environments */
cddd431b 930 env_len = (size_t)OALIGN_CEIL(sizeof(struct async_clenv), long);
7a56f31f
MM
931 if (!pool_init(&async->apool, malloc, free, env_len + async->msg_len,
932 16384 / (env_len + async->msg_len), 0, 0)) {
460b991b 933 DPRINTF("async_init_pth", "pool_init(apool)");
47071c2b
MM
934 res = FALSE;
935 }
936
937 if (res) {
938 pth_attr_t attrs;
939
940 res = FALSE;
941 if ((attrs = pth_attr_new()) != NULL) {
942 /* Start async slave thread device */
943 if ((async->port = pth_msgport_create("mms_async_server"))) {
944 if ((async->ring = pth_event(PTH_EVENT_MSG,
945 async->port))) {
946 pth_attr_set(attrs, PTH_ATTR_JOINABLE, FALSE);
947 if ((pth_spawn(attrs, async_thread, NULL)) != NULL)
948 res = TRUE;
949 pth_attr_destroy(attrs);
950 } else
460b991b 951 DPRINTF("async_init_pth", "pth_event()");
47071c2b 952 } else
460b991b 953 DPRINTF("async_init_pth", "pth_msgport_create()");
47071c2b
MM
954 pth_attr_destroy(attrs);
955 }
956 }
957
958 if (!res) {
959 if (async->ring) {
960 pth_event_free(async->ring, PTH_FREE_ALL);
961 async->ring = NULL;
962 }
963 if (async->port) {
964 pth_msgport_destroy(async->port);
965 async->port = NULL;
966 }
7a56f31f 967 pool_destroy(&async->apool);
47071c2b
MM
968 }
969
970 } else
460b991b 971 DPRINTF("async_init_pth", "Call async_init() first");
47071c2b
MM
972
973 return (res);
974}
975
976
977/* Used by a client thread to initialize a context suitable to call the
978 * async_call() function with. Each thread needs one to use the device.
979 * async_init() and async_init_pth() should have been called first.
980 */
981struct async_clenv *
982async_open_clenv(void)
983{
984 struct async_clenv *aclenv;
985
e2a16cfa 986 if (async != NULL && POOL_VALID(&async->apool)) {
47071c2b 987 /* Optimized for speed using a pool of pre-allocated nodes */
7a56f31f
MM
988 pth_mutex_acquire(&async->apool_lock, FALSE, NULL);
989 aclenv = (struct async_clenv *)pool_alloc(&async->apool, FALSE);
990 pth_mutex_release(&async->apool_lock);
47071c2b
MM
991 if (aclenv) {
992 if ((aclenv->port = pth_msgport_create("XXX(NULL)"))) {
993 if ((aclenv->ring = pth_event(PTH_EVENT_MSG, aclenv->port))) {
5eb34fba 994 register char *ptr = (char *)aclenv;
47071c2b 995
cddd431b 996 ptr += (size_t)OALIGN_CEIL(sizeof(struct async_clenv),
47071c2b 997 long);
5eb34fba 998 aclenv->msg = (struct async_msg *)ptr;
47071c2b
MM
999 /* Pth has no cleaner API for this at current time */
1000 aclenv->msg->msg.m_replyport = aclenv->port;
1001 aclenv->msg->msg.m_size = async->msg_len;
1002
1003 return (aclenv);
1004 }
1005 pth_msgport_destroy(aclenv->port);
1006 }
7a56f31f
MM
1007 pth_mutex_acquire(&async->apool_lock, FALSE, NULL);
1008 pool_free((pnode_t *)aclenv);
1009 pth_mutex_release(&async->apool_lock);
47071c2b
MM
1010 }
1011 } else
460b991b
MM
1012 DPRINTF("async_open_clenv",
1013 "Call async_init() and async_init_pth() first");
47071c2b
MM
1014
1015 return (NULL);
1016}
1017
1018
1019/* Destroys an async_clenv which was previously initialized using the
1020 * async_open_clenv() function.
1021 */
1022struct async_clenv *
1023async_close_clenv(struct async_clenv *aclenv)
1024{
1025 pth_event_free(aclenv->ring, PTH_FREE_ALL);
1026 pth_msgport_destroy(aclenv->port);
1027
7a56f31f
MM
1028 pth_mutex_acquire(&async->apool_lock, FALSE, NULL);
1029 pool_free((pnode_t *)aclenv);
1030 pth_mutex_release(&async->apool_lock);
47071c2b
MM
1031
1032 return (NULL);
1033}
1034
1035
1036/* This is the function which is used to call async functions handled by
1037 * async_thread() and our pool of slave processes. It does not check for
1038 * timeout, the async functions set should be sure to always return within a
1039 * reasonable delay if they could block indefinitely.
1040 */
1041void
1042async_call(struct async_clenv *aclenv, unsigned int function)
1043{
1044 /* Send request */
1045 aclenv->msg->func_id = function;
1046 aclenv->msg->aclenv = aclenv;
1047 pth_msgport_put(async->port, &(aclenv->msg->msg));
1048
5eb34fba
MM
1049 /* Sleep until we get reply back, in a thread-safe manner.
1050 * Note: This will permanently block the current thread if the
1051 * task does not complete. It would be possible to respect a timeout
1052 * here, however this could cause problems. For instance, we may have
1053 * sent a resolve request which takes some time because of a network
1054 * problem, and the client could disconnect (and this thread end).
1055 * The async function reply would then attempt to use a no longer
1056 * existing port. (It could look for the port name in the list, but
1057 * this would have considerable performance drawbacks. Ideally,
1058 * any async operation would be abortable by the thread that initiated
1059 * it. This could be tricky to implement in order to work for everything
1060 * properly, avoiding resource leaks). So blocking the thread until
1061 * we get a reply back seems a good choice here; The async functions
1062 * should ensure to eventually return.
1063 */
47071c2b
MM
1064 for (;;) {
1065 if ((pth_wait(aclenv->ring))) {
1066 if ((pth_event_occurred(aclenv->ring))) {
1067 /* We know that message pointer will be the same */
1068 if ((pth_msgport_get(aclenv->port)))
1069 break;
1070 }
1071 }
1072 }
1073}
1074
1075
1076/* This function is called by async_init() and starts up all slave
1077 * asynchroneous processes. It knows when to stop when the blocklist has no
1078 * more entries. It is expected that make_daemon() was used first, so that
1079 * filedescriptors 0-2 be already redirected to /dev/null.
1080 */
1081static bool
6bd3b87c 1082spawn_async_procs(uid_t uid, gid_t *gids, int ngids, size_t msg_len)
47071c2b
MM
1083{
1084 struct async_proc *aproc;
6bd3b87c 1085 int s[2], opt;
47071c2b
MM
1086 bool ret = FALSE;
1087
7a56f31f
MM
1088 if (async == NULL)
1089 return ret;
47071c2b
MM
1090
1091 do {
7a56f31f 1092 if ((aproc = (struct async_proc *)pool_alloc(&async->freeprocs_pool,
47071c2b
MM
1093 TRUE))) {
1094 if ((socketpair(PF_UNIX, SOCK_DGRAM, 0, s)) == 0) {
6bd3b87c
MM
1095
1096 /* Set buffer sizes to make sure that atomic datagram
1097 * operations work as expected for the maximum packet size
1098 */
1099 opt = (int)BALIGN_CEIL(msg_len, 1024);
1100 if (setsockopt(s[0], SOL_SOCKET, SO_SNDBUF, &opt,
1101 sizeof(int)) == -1 ||
1102 setsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &opt,
1103 sizeof(int)) == -1 ||
1104 setsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &opt,
1105 sizeof(int)) == -1 ||
1106 setsockopt(s[1], SOL_SOCKET, SO_RCVBUF, &opt,
1107 sizeof(int)) == -1)
460b991b 1108 DPRINTF("spawn_async_procs", "setsockopt(SO_*BUF)");
6bd3b87c 1109
47071c2b
MM
1110 aproc->sock = s[0];
1111 /* Create new process */
1112 if (!(aproc->pid = fork())) {
1113 close(s[0]);
1114
1115 /* Child */
1116 chdir("/");
1117 umask(0);
1118
1119 /* Finally drop privileges and perform our tasks */
1120 if (mmdropprivs(uid, gids, ngids)) {
1121 close(s[0]);
d2558e27
MM
1122 while ((aproc = DLIST_TOP(&async->freeprocs))
1123 != NULL) {
47071c2b 1124 close(aproc->sock);
d2558e27 1125 DLIST_UNLINK(&async->freeprocs, (node_t *)aproc);
47071c2b
MM
1126 }
1127 async_proc_main(s[1]);
1128 } else
460b991b 1129 DPRINTF("spawn_async_procs", "mmdropprivs()");
47071c2b
MM
1130
1131 } else if (aproc->pid == -1) {
7a56f31f 1132 pool_free((pnode_t *)aproc);
47071c2b 1133 aproc = NULL;
460b991b 1134 DPRINTF("spawn_async_procs", "fork()");
47071c2b
MM
1135 close(s[0]);
1136 close(s[1]);
1137 }
1138 } else
460b991b 1139 DPRINTF("spawn_async_procs", "socketpair()");
47071c2b
MM
1140 }
1141 if (aproc) {
1142 close(s[1]);
d2558e27 1143 DLIST_APPEND(&async->freeprocs, (node_t *)aproc);
47071c2b
MM
1144 }
1145 else ret = TRUE;
1146 } while (aproc);
1147
1148 return (ret);
1149}
1150
1151
1152/* The main loop of each asynchroneous process in the pool */
1153static void
1154async_proc_main(int sock)
1155{
1156 struct pollfd pfd[] = {
1157 {-1, POLLIN | POLLERR, 0}
1158 };
1159
0d8fc7f8 1160#ifndef __GLIBC__
2556ec01 1161 setproctitle("Asynchroneous server process");
0d8fc7f8
MM
1162#endif
1163
47071c2b
MM
1164 pfd[0].fd = sock;
1165
1166 for (;;) {
1167 int selr;
1168 unsigned int func_id;
1169 size_t len;
1170
1171 /* Wait for packets, process them and send result packets */
1172 if ((selr = poll(pfd, 1, -1))) {
1173 if (selr == 1) {
1174 if (pfd[0].revents & POLLERR) {
1175 /* This should not happen. If it does something really
1176 * went wrong; Exit.
1177 */
460b991b 1178 DPRINTF("async_proc_main", "Socket error (%d)", sock);
47071c2b
MM
1179 kill(0, SIGTERM);
1180 for (;;)
1181 sleep(30);
1182 }
1183 if (pfd[0].revents & POLLIN) {
1184 if ((len = recv(sock, async->msg, async->msg_len,
1185 MSG_WAITALL))
1186 >= sizeof(struct async_msg)) {
1187 func_id = async->msg->func_id;
1188 if (func_id < async->nfuncs &&
1189 len == async->funcs[func_id].msg_len) {
1190 async->funcs[func_id].func(async->msg);
1191 if ((send(sock, async->msg, len, 0)) != len)
460b991b
MM
1192 DPRINTF("async_proc_main", "send(%d:%d)",
1193 sock, (int)len);
47071c2b 1194 } else
460b991b
MM
1195 DPRINTF("async_proc_main",
1196 "Invalid function %u on socket %d",
2556ec01 1197 func_id, sock);
47071c2b 1198 } else
460b991b
MM
1199 DPRINTF("async_proc_main", "recv(%d:%d)",
1200 sock, (int)async->msg_len);
47071c2b
MM
1201 }
1202 }
1203 }
1204 }
1205}
1206
1207
1208/* This function will always be available in the asynchoneous functions set,
1209 * referenced as ASYNC_RESOLVEHOSTNAME when using async_call().
1210 */
1211static void
1212async_resolvehostname(struct async_msg *msg)
1213{
1214 struct async_resolvehostname_msg *amsg = (void *)msg;
1215 struct sockaddr *saddr = &(amsg->un.args.saddr);
1216
1217 if ((getnameinfo(saddr, sizeof(struct sockaddr_in), amsg->un.res.hostname,
1218 255, NULL, 0, 0)) != 0) {
460b991b 1219 DPRINTF("async_resolvehostname", "getnameinfo()");
47071c2b
MM
1220 mm_strcpy(amsg->un.res.hostname, "unknown");
1221 }
1222}
1223
1224
1225/* Easy function wrapper for async_resolvehostname(). mmstrfree() should
1226 * be called to release the supplied result string when no longer needed.
1227 * This function is thread-safe, as well as asynchroneous, the current
1228 * process (all threads) will remain free to execute other tasks when
1229 * the current thread waits for the hostname resolution to complete, as the
1230 * request is dispatched to one of the asynchroneous processes in our pool.
1231 */
1232char *
1233resolve_hostname(struct async_clenv *aclenv, struct sockaddr *saddr)
1234{
1235 char *tmp;
1236 struct async_resolvehostname_msg *amsg = (void *)aclenv->msg;
1237
e2a16cfa 1238 if ((tmp = mmstralloc(255)) != NULL) {
47071c2b
MM
1239 mm_memcpy(&(amsg->un.args.saddr), saddr, sizeof(struct sockaddr));
1240 async_call(aclenv, ASYNC_RESOLVEHOSTNAME);
1241 mm_strncpy(tmp, amsg->un.res.hostname, 255);
1242 } else
460b991b 1243 DPRINTF("resolve_hostname", "mmstralloc()");
47071c2b
MM
1244
1245 return (tmp);
1246}
904cd663
MM
1247
1248
1249/* These are related to the clnode ipaddress hash table. */
1250
1251
1252/* A quick hashtable_hash() replacement which already deals with unique
1253 * 32-bit values
1254 */
1255/* ARGSUSED */
1256static u_int32_t
1257clnode_keyhash(const void *data, size_t len)
1258{
1259 return *((u_int32_t *)data);
1260}
1261
1262
1263/* A quick memcmp() replacement which only needs to compare two 32-bit values
1264 */
1265/* ARGSUSED */
1266static int
1267clnode_keycmp(const void *src, const void *dst, size_t len)
1268{
1269 return *((u_int32_t *)src) - *((u_int32_t *)dst);
1270}
1271
1272
1273/* This is the actual ctable hash control and expiration thread */
1274/* ARGSUSED */
1275static void *
1276clnode_expire_thread(void *args)
1277{
1278 long period = *((long *)args);
1279 struct clnode_expire_thread_iterator_udata data;
1280
1281 /* Set initial timeout to maximum allowed */
da634739 1282 data.soonest = period;
904cd663 1283 data.cnt = 0;
904cd663
MM
1284 for (;;) {
1285 /* Sleep until it is known that at least one node expired */
da634739 1286 pth_sleep((unsigned int)data.soonest);
904cd663
MM
1287 /* Tell our iterator function the current time and the maximum
1288 * allowed time to wait to
1289 */
1290 data.current = time(NULL);
da634739 1291 data.soonest = period;
904cd663
MM
1292 /* Lock ctable, reset expired nodes, garbage collect, and set
1293 * data.soonest to the time of the soonest next expireing node.
1294 */
1295 pth_mutex_acquire(&ctable_lock, FALSE, NULL);
b4c02473
MM
1296 if (HASHTABLE_NODES(&ctable) > 0)
1297 hashtable_iterate(&ctable, clnode_expire_thread_iterator, &data);
904cd663
MM
1298 pth_mutex_release(&ctable_lock);
1299 }
1300
1301 /* NOTREACHED */
1302 pth_exit(NULL);
1303 return NULL;
1304}
1305
1306
1307static bool
1308clnode_expire_thread_iterator(hashnode_t *hnod, void *udata)
1309{
1310 clientlistnode *clnode = (clientlistnode *)hnod;
1311 struct clnode_expire_thread_iterator_udata *data = udata;
62332f2c 1312 time_t rem;
904cd663 1313
62332f2c 1314 /* If the node expired, reset it. For nodes which do not, record the
da634739
MM
1315 * soonest to expire node. For nodes which expired and for which no
1316 * connections exist anymore, expunge them.
62332f2c 1317 */
da634739
MM
1318 if ((rem = LR_REMAINS(&clnode->lr, data->current)) == 0) {
1319 /* This entry expired */
1320 if (clnode->connections == 0) {
1321 /* Safe to expunge this node from the cache */
1322 hashtable_unlink(&ctable, (hashnode_t *)clnode);
1323 pool_free((pnode_t *)clnode);
1324 } else {
1325 /* Reset it */
1326 LR_EXPIRE(&clnode->lr, data->current);
1327 rem = LR_REMAINS(&clnode->lr, data->current);
1328 }
904cd663 1329 }
da634739
MM
1330 if (rem != 0 && data->soonest > rem)
1331 data->soonest = rem;
904cd663
MM
1332
1333 /* If the cache is big, prevent from interfering with other threads */
1334 if ((data->cnt++) == 64) {
1335 data->cnt = 0;
1336 pth_yield(NULL);
1337 }
1338
1339 return TRUE;
1340}