mmlib/mmat: replace some variables by literal constants
[mmondor.git] / mmsoftware / mmlib / mmserver.c
1 /* $Id: mmserver.c,v 1.49 2010/03/27 06:10:34 mmondor Exp $ */
2
3 /*
4 * Copyright (C) 2000-2008, Matthew Mondor
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Matthew Mondor.
18 * 4. The name of Matthew Mondor may not be used to endorse or promote
19 * products derived from this software without specific prior written
20 * permission.
21 * 5. Redistribution of source code may not be released under the terms of
22 * any GNU Public License derivate.
23 *
24 * THIS SOFTWARE IS PROVIDED BY MATTHEW MONDOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL MATTHEW MONDOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
30 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36
37
38
39 /* HEADERS */
40
41 #include <sys/types.h>
42 #include <sys/wait.h>
43 #include <sys/stat.h>
44 #include <fcntl.h>
45 #include <stdbool.h>
46 #include <stdint.h>
47 #include <stdlib.h>
48 #include <unistd.h>
49 #include <stdio.h>
50 #include <string.h>
51 #include <errno.h>
52
53 #include <netdb.h>
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <sys/socket.h>
59 #include <arpa/inet.h>
60 #include <arpa/nameser.h>
61 #include <resolv.h>
62
63 #include <syslog.h>
64
65 #include <pthread.h>
66 #include <signal.h>
67 #include <sys/poll.h>
68 #include <time.h>
69
70 #include <mmstr.h>
71 #include <mmstring.h>
72 #include <mmfd.h>
73 #include <mmlist.h>
74 #include <mmpool.h>
75 #include <mmhash.h>
76 #include <mmserver.h>
77 #include <mmreadcfg.h>
78 #include <mmlimitrate.h>
79 #include <mmlog.h>
80
81 #include <mm_pthread_pool.h>
82 #include <mm_pthread_msg.h>
83 #include <mm_pthread_poll.h>
84 #include <mm_pthread_sleep.h>
85
86
87
88
89 MMCOPYRIGHT("@(#) Copyright (c) 2000-2004\n\
90 \tMatthew Mondor. All rights reserved.\n");
91 MMRCSID("$Id: mmserver.c,v 1.49 2010/03/27 06:10:34 mmondor Exp $");
92
93
94
95
96 /* DEFINITIONS */
97
98
99
100
101 /* STATIC FUNCTIONS PROTOTYPES */
102
103 static void sighandler(int);
104 static struct iface * parse_ifaces(char *, char *);
105 static void phandleclient(pthread_object_t *, void *,
106 void *);
107 static void writepidfile(const char *);
108 static void * async_thread(void *);
109 static struct async_clenv * async_open_clenv(void);
110 static struct async_clenv * async_close_clenv(struct async_clenv *);
111 static bool aclenv_constructor(pnode_t *);
112 static void aclenv_destructor(pnode_t *);
113 static bool spawn_async_procs(uid_t, gid_t *, int,
114 size_t);
115 static void async_proc_main(int);
116 static void async_resolvehostname(struct async_msg *);
117
118 static uint32_t server_sockaddr_hash(const void *, size_t);
119 static int server_sockaddr_cmp(const void *,
120 const void *, size_t);
121 static void * clnode_expire_thread(void *);
122 static bool clnode_expire_thread_iterator(hashnode_t *,
123 void *);
124
125
126
127
128 /* GLOBAL VARIABLES */
129
130 static int (*handleclientfunc)(unsigned long, int,
131 clientlistnode *, struct iface *,
132 struct async_clenv *, void *);
133 static pthread_mutex_t ctable_lock = PTHREAD_MUTEX_INITIALIZER,
134 ppool_lock = PTHREAD_MUTEX_INITIALIZER;
135 static pool_t cpool, ppool;
136 static hashtable_t ctable;
137 static struct async_env *async = NULL;
138 struct server_af_info **server_afi = NULL;
139 static struct server_af_info *i_server_afi = NULL;
140 static bool server_afi_initialized = false;
141
142 /* Useful so that daemons can log standard disconnection status easily */
143 const char *const mms_reasons[MMS_MAX] = {
144 "NORMAL",
145 "INPUT_TIMEOUT",
146 "OUTPUT_TIMEOUT",
147 "INPUT_ERROR",
148 "OUTPUT_ERROR",
149 "RESOURCE_ERROR",
150 "NOT_AVAILABLE",
151 "MANY_ERRORS",
152 "CONNECTION_RATE_EXCEEDED", /* These three used internally */
153 "NUMBER_OF_ADDRESSES_EXCEEDED",
154 "CONNECTIONS_PER_ADDRESS_EXCEEDED",
155 "UNKNOWN"
156 };
157
158
159
160
161 /* MAIN DAEMON PROGRAM */
162
163 /* Before calling this function, the async_init() and async_init_pthread()
164 * functions should have been called. Before any async_*() or tcp_server()
165 * calls the process is expected to have called make_daemon().
166 */
167 void
168 tcp_server(char *message, char *server_names, char *listen_ips, uid_t uid,
169 gid_t *gids, int ngids, long maxips, long maxperip, long ratemax,
170 long rateper, long timeout, int port, bool resolve,
171 int (*handleclient1)(unsigned long, int, clientlistnode *,
172 struct iface *, struct async_clenv *, void *),
173 void *(*cons_uhook)(void), void (*dest_uhook)(void *))
174 {
175 struct server_sockaddr server, addr;
176 socklen_t addrl;
177 clientlistnode *clnode;
178 int msgsock, ret, msglen, nifaces, nifaces2, i, reason;
179 bool ok;
180 struct pollfd *fds;
181 struct ifacendx *fdsi;
182 struct iface *ifaces, *tif;
183 unsigned long id;
184 pthread_t clnode_thread;
185 pthread_attr_t threadattr, clnode_threadattr;
186 int tcp_proto, clnode_thread_started, err;
187
188 handleclientfunc = handleclient1;
189 id = (unsigned long)time(NULL);
190 msgsock = -1;
191 msglen = strlen(message);
192 fds = NULL;
193 fdsi = NULL;
194 ifaces = NULL;
195 clnode_thread_started = -1;
196
197 /* Pthread related */
198 pthread_attr_init(&threadattr);
199 pthread_attr_setdetachstate(&threadattr, PTHREAD_CREATE_DETACHED);
200 pthread_attr_init(&clnode_threadattr);
201 pthread_attr_setdetachstate(&clnode_threadattr, PTHREAD_CREATE_JOINABLE);
202
203 /* Used by resolve_hostname() */
204 if (!mmstrinit(malloc, free, 16384)) {
205 syslog(LOG_NOTICE, "tcp_server() - mmstrinit()");
206 closelog();
207 exit(-1);
208 }
209
210 if (!server_init_afi()) {
211 syslog(LOG_NOTICE, "tcp_server() - server_init_afi()");
212 closelog();
213 exit(-1);
214 }
215 if ((ifaces = parse_ifaces(server_names, listen_ips)) == NULL) {
216 closelog();
217 mmstrexit();
218 exit(-1);
219 }
220
221 if (!pool_init(&cpool, "clients_pool", malloc, free, NULL, NULL,
222 sizeof(clientlistnode), 65536 / sizeof(clientlistnode),
223 0, 0)) {
224 syslog(LOG_NOTICE, "tcp_server() - pool_init(clients_pool) - %s",
225 strerror(errno));
226 exit(-1);
227 }
228 if (!pool_init(&ppool, "phandles_pool", malloc, free, NULL, NULL,
229 sizeof(phandleinfo), 16384 / sizeof(phandleinfo), 0, 0)) {
230 syslog(LOG_NOTICE, "tcp_server() - pool_init(phandles_pool) - %s",
231 strerror(errno));
232 exit(-1);
233 }
234 if (!hashtable_init(&ctable, "ctable", maxips, 1.0, malloc, free,
235 server_sockaddr_cmp, server_sockaddr_hash, false)) {
236 syslog(LOG_NOTICE, "tcp_server() - hashtable_init(ctable) - %s",
237 strerror(errno));
238 exit(-1);
239 }
240
241 /* Launch threads */
242 if ((clnode_thread_started = pthread_create(&clnode_thread,
243 &clnode_threadattr, clnode_expire_thread, &rateper))
244 != 0) {
245 syslog(LOG_NOTICE,
246 "tcp_server() - pthread_create(clnode_thread) - %s",
247 strerror(errno));
248 exit(-1);
249 }
250 if ((err = pthread_object_init(8, cons_uhook, dest_uhook)) != 0) {
251 syslog(LOG_NOTICE, "tcp_server() - pthread_object_init() - %s",
252 strerror(err));
253 exit(-1);
254 }
255
256 /* Bind the port we should listen to, with all specified addresses */
257 {
258 struct protoent *pent;
259
260 /* Get protocol number for TCP */
261 if ((pent = getprotobyname("TCP")) != NULL)
262 tcp_proto = pent->p_proto;
263 else
264 tcp_proto = -1;
265 }
266 for (ret = nifaces = 0, tif = ifaces; *(tif->hostname); tif++) {
267 if ((tif->sock = socket(*(SERVER_SOCKADDR_FAMILY(&tif->address)),
268 SOCK_STREAM, 0)) > -1) {
269 int opt;
270 struct linger linger;
271
272 opt = 1;
273 if ((setsockopt(tif->sock, SOL_SOCKET, SO_REUSEADDR, &opt,
274 sizeof(int))) == -1)
275 DEBUG_PRINTF("Error on setsockopt(%s:%d, SO_REUSEADDR) - (%s)",
276 tif->address_str, port, tif->hostname);
277 if ((setsockopt(tif->sock, SOL_SOCKET, SO_KEEPALIVE, &opt,
278 sizeof(int))) == -1)
279 DEBUG_PRINTF("Error on setsockopt(%s:%d, SO_KEEPALIVE) - (%s)",
280 tif->address_str, port, tif->hostname);
281
282 linger.l_onoff = 0;
283 linger.l_linger = 0;
284 if ((setsockopt(tif->sock, SOL_SOCKET, SO_LINGER, &linger,
285 sizeof(struct linger))) == -1)
286 DEBUG_PRINTF("Error on setsockopt(%s:%d, SO_LINGER) - (%s)",
287 tif->address_str, port, tif->hostname);
288
289 opt = 65536;
290 if ((setsockopt(tif->sock, SOL_SOCKET, SO_SNDBUF, &opt,
291 sizeof(int))) == -1)
292 DEBUG_PRINTF("Error on setsockopt(%s:%d, SO_SNDBUF) - (%s)",
293 tif->address_str, port, tif->hostname);
294
295 if (tcp_proto != -1) {
296 opt = 1;
297 if ((setsockopt(tif->sock, tcp_proto, TCP_NODELAY, &opt,
298 sizeof(int))) == -1)
299 DEBUG_PRINTF(
300 "Error on setsockopt(%s:%d, TCP_NODELAY) - (%s)",
301 tif->address_str, port, tif->hostname);
302 }
303 (void) memcpy(&server, &tif->address,
304 sizeof(struct server_sockaddr));
305 *(SERVER_SOCKADDR_PORT(&server)) = htons(port);
306 if ((bind(tif->sock, (struct sockaddr *)(void *)&server,
307 SERVER_SOCKADDR_SOCKLEN(&server))) == 0)
308 nifaces++;
309 else {
310 syslog(LOG_NOTICE,
311 "tcp_server() - Error on bind(%s:%d) - (%s)",
312 tif->address_str, port, tif->hostname);
313 close(tif->sock);
314 tif->sock = -1;
315 }
316 }
317 }
318
319 /* Drop privileges and start listening to all bound ports */
320 if (nifaces > 0) {
321 if (mmdropprivs(uid, gids, ngids)) {
322 for (tif = ifaces; *(tif->hostname); tif++) {
323 if (tif->sock != -1) {
324 if ((listen(tif->sock, maxips * maxperip)) != 0) {
325 syslog(LOG_NOTICE, "tcp_server() - listen(%s)",
326 tif->hostname);
327 close(tif->sock);
328 tif->sock = -1;
329 nifaces--;
330 } else
331 syslog(LOG_NOTICE, "Listening on [%s.%d] - (%s)",
332 tif->address_str, port, tif->hostname);
333 }
334 }
335 } else {
336 syslog(LOG_NOTICE,
337 "tcp_server() - Cannot change uid and gid to safe privs");
338 ret = -1;
339 }
340 } else {
341 syslog(LOG_NOTICE,
342 "tcp_server() - no interfaces (left) to listen to");
343 ret = -1;
344 }
345
346 if (ret == 0) {
347 size_t idx_size, pfd_size;
348
349 /* Setup our poll() array, with corresponding index array */
350 pfd_size = (size_t)OALIGN_CEIL(sizeof(struct pollfd) *
351 (nifaces + 1), long);
352 idx_size = (size_t)OALIGN_CEIL(sizeof(struct ifacendx) *
353 (nifaces + 1), long);
354 if ((fds = malloc(pfd_size + idx_size)) != NULL) {
355 for (fdsi = (struct ifacendx *)(((char *)fds) + pfd_size),
356 i = 0, tif = ifaces;
357 *(tif->hostname) != '\0'; tif++) {
358 if (tif->sock != -1) {
359 fdsi[i].iface = tif;
360 fds[i].fd = tif->sock;
361 fds[i].events = POLLIN;
362 fds[i].revents = 0;
363 i++;
364 }
365 }
366 } else {
367 syslog(LOG_NOTICE, "tcp_server() - Out of memory");
368 ret = -1;
369 }
370 } else
371 ret = -1;
372
373 if (ret != 0) {
374 free(ifaces);
375 mmstrexit();
376 if (clnode_thread_started == 0) {
377 pthread_cancel(clnode_thread);
378 pthread_join(clnode_thread, NULL);
379 }
380 if (HASHTABLE_VALID(&ctable))
381 hashtable_destroy(&ctable, false);
382 if (POOL_VALID(&ppool))
383 pool_destroy(&ppool);
384 if (POOL_VALID(&cpool))
385 pool_destroy(&cpool);
386 exit(ret);
387 }
388
389 /* Start answering and dispatching connections */
390 syslog(LOG_NOTICE, "Started for uid: %d", uid);
391 for (;;) {
392 if ((nifaces2 = poll(fds, nifaces, -1)) > 0) {
393 time_t curtime = time(NULL);
394
395 /* Loop but once only, and only for long as nifaces2 times.
396 * Use our fast index to reference to the interface structure.
397 */
398 for (i = 0; i < nifaces && nifaces2; i++) {
399 if (fds[i].revents & POLLIN) {
400 nifaces2--;
401 addr.ss_family = fdsi[i].iface->address.ss_family;
402 addrl = SERVER_SOCKADDR_SOCKLEN(&addr);
403 if ((msgsock = accept(fds[i].fd, SERVER_SOCKADDR(&addr),
404 &addrl)) != -1) {
405 /* Make sure that we respect connection and rate
406 * limits.
407 */
408 ok = false;
409 reason = MMS_NORMAL;
410 pthread_mutex_lock(&ctable_lock);
411 if ((clnode = (clientlistnode *)hashtable_lookup(
412 &ctable, &addr,
413 sizeof(struct server_sockaddr)))
414 == NULL) {
415 /* Create new node */
416 if (HASHTABLE_NODES(&ctable) < maxips) {
417 if ((clnode = (clientlistnode *)pool_alloc(
418 &cpool, false)) != NULL) {
419 clnode->hostname = NULL;
420 clnode->connections = 0;
421 if (ratemax != 0)
422 LR_INIT(&clnode->lr, ratemax,
423 (time_t)rateper, curtime);
424 clnode->timeout = (timeout * 1000);
425 (void) memcpy(&clnode->client, &addr,
426 sizeof(struct server_sockaddr));
427 clnode->resolve = resolve;
428 hashtable_link(&ctable,
429 (hashnode_t *)clnode,
430 &clnode->client,
431 sizeof(struct server_sockaddr),
432 false);
433 } else {
434 syslog(LOG_NOTICE,
435 "tcp_server() - Out of memory");
436 reason = MMS_RESOURCE_ERROR;
437 }
438 } else
439 reason = MMS_ADDRESSES_EXCEEDED;
440 }
441 if (clnode != NULL) {
442 /* Either we found an existing node or we created
443 * it successfully
444 */
445 if (clnode->connections < maxperip) {
446 if (ratemax != 0) {
447 if (!(ok = lr_allow(&clnode->lr, 1, 0,
448 false)))
449 reason = MMS_RATE_EXCEEDED;
450 } else
451 ok = true;
452 } else
453 reason = MMS_CONPERADDRESS_EXCEEDED;
454 }
455 pthread_mutex_unlock(&ctable_lock);
456
457 if (ok) {
458 phandleinfo *phi;
459
460 /* Dispatch client to a thread */
461 pthread_mutex_lock(&ppool_lock);
462 phi = (phandleinfo *)pool_alloc(&ppool, false);
463 pthread_mutex_unlock(&ppool_lock);
464 if (phi != NULL) {
465 id++;
466 phi->id = id;
467 phi->socket = msgsock;
468 phi->clnode = clnode;
469 /* Indexed reference */
470 phi->iface = fdsi[i].iface;
471 if (pthread_object_call(NULL, phandleclient,
472 phi) == 0) {
473 pthread_mutex_lock(&ctable_lock);
474 clnode->connections++;
475 pthread_mutex_unlock(&ctable_lock);
476 } else {
477 if (phi != NULL) {
478 pthread_mutex_lock(&ppool_lock);
479 pool_free((pnode_t *)phi);
480 pthread_mutex_unlock(&ppool_lock);
481 }
482 syslog(LOG_NOTICE, "tcp_server() - "
483 "Error launching thread");
484 }
485 } else {
486 syslog(LOG_NOTICE,
487 "tcp_server() - Out of memory");
488 reason = MMS_RESOURCE_ERROR;
489 }
490 } else {
491 char ipaddr[64];
492
493 /* Close down connection with client
494 * immediately, sending message.
495 */
496 (void) write(msgsock, message, msglen);
497 shutdown(msgsock, 2);
498 close(msgsock);
499 strcpy(ipaddr, "0.0.0.0");
500 inet_ntop(*(SERVER_SOCKADDR_FAMILY(&addr)),
501 SERVER_SOCKADDR_ADDRESS(&addr),
502 ipaddr, 63);
503 if (reason != MMS_NORMAL)
504 syslog(LOG_NOTICE, "tcp_server() - [%s] - %s",
505 ipaddr, MMS_RSTRING(reason));
506 }
507 } else
508 syslog(LOG_NOTICE, "tcp_server() - accept() - [%s]",
509 strerror(errno));
510 }
511 }
512 }
513 }
514
515 free(ifaces);
516 mmstrexit();
517 pthread_cancel(clnode_thread);
518 pthread_join(clnode_thread, NULL);
519 hashtable_destroy(&ctable, false);
520 pool_destroy(&ppool);
521 pool_destroy(&cpool);
522
523 exit(ret);
524 }
525
526
527 /* Writes our process ID number to specified file. To be called before
528 * chroot(2) or dropping privileges.
529 */
530 static void
531 writepidfile(const char *file)
532 {
533 char str[16];
534 int fd;
535
536 if ((fd = open(file, O_CREAT | O_TRUNC | O_WRONLY, 0600)) != -1) {
537 snprintf(str, 15, "%d\n", getpid());
538 write(fd, str, strlen(str));
539 close(fd);
540 } else
541 syslog(LOG_NOTICE, "writepidfile() - open(%s)", file);
542 }
543
544
545 /* Uses fork() to become a daemon, and detaches from the current tty.
546 * Returns either successful or not
547 */
548 bool
549 make_daemon(const char *pidfile, const char *jail)
550 {
551 int pid;
552
553 #ifdef NODETACH
554 pid = getpid();
555 #else
556 if ((pid = fork()) != -1)
557 #endif
558 {
559
560 #ifndef NODETACH
561 if (pid > 0) {
562 /* Parent */
563 exit(0);
564 } else
565 #endif
566 {
567 #ifndef NODETACH
568 int fd;
569 #endif
570 struct sigaction act;
571
572 #ifndef NODETACH
573 /* Child */
574 setsid();
575 chdir("/");
576 if ((fd = open("/dev/null", O_RDWR)) != -1) {
577 dup2(fd, 0);
578 dup2(fd, 1);
579 dup2(fd, 2);
580 if (fd > 2)
581 close(fd);
582 }
583 #endif
584 writepidfile(pidfile);
585
586 if (jail && *jail) {
587 if ((chroot(jail)) == -1) {
588 syslog(LOG_NOTICE,
589 "make_daemon() - Could not chroot(2) to '%s'",
590 jail);
591 exit(-1);
592 }
593 chdir("/");
594 }
595
596 /* Setup our break handler and ignore wanted signals */
597 act.sa_handler = sighandler;
598 act.sa_flags = SA_NOCLDWAIT;
599 sigemptyset(&act.sa_mask);
600 sigaction(SIGTERM, &act, NULL);
601 sigaction(SIGCHLD, &act, NULL);
602 #ifndef NODETACH
603 sigaction(SIGSEGV, &act, NULL);
604 #endif
605 signal(SIGTTOU, SIG_IGN);
606 signal(SIGTTIN, SIG_IGN);
607 signal(SIGTSTP, SIG_IGN);
608 signal(SIGPIPE, SIG_IGN);
609 signal(SIGUSR2, SIG_IGN);
610
611 umask(0);
612
613 #ifndef __GLIBC__
614 setproctitle("Main threaded process");
615 #endif
616
617 return (true);
618 }
619 }
620
621 return (false);
622 }
623
624
625 static void
626 sighandler(int sig)
627 {
628 switch (sig) {
629 #ifndef NODETACH
630 case SIGSEGV:
631 syslog(LOG_NOTICE, "Exiting (SIGSEGV)");
632 kill(0, SIGTERM);
633 exit(0);
634 break;
635 case SIGABRT:
636 syslog(LOG_NOTICE, "Exiting (SIGABRT)");
637 signal(SIGTERM, SIG_IGN);
638 kill(0, SIGTERM);
639 exit(0);
640 break;
641 #endif
642 case SIGTERM:
643 syslog(LOG_NOTICE, "Exiting (SIGTERM)");
644 signal(SIGTERM, SIG_IGN);
645 kill(0, SIGTERM);
646 exit(0);
647 break;
648 case SIGCHLD:
649 {
650 int s = 0;
651
652 while ((wait3(&s, WNOHANG, NULL)) > 0) ;
653 }
654 }
655 }
656
657
658 bool
659 server_init_afi(void)
660 {
661 int i, limit;
662 struct sockaddr_in sin;
663 struct sockaddr_in6 sin6;
664
665 if (server_afi_initialized)
666 return true;
667
668 limit = 0;
669 if (limit < AF_INET)
670 limit = AF_INET;
671 if (limit < AF_INET6)
672 limit = AF_INET6;
673 limit++;
674
675 if ((i_server_afi = malloc(sizeof(struct server_af_info) * 2)) == NULL ||
676 (server_afi = malloc(sizeof(struct server_af_info *) * limit))
677 == NULL)
678 goto err;
679
680 /* AF_INET */
681 i_server_afi[0].sock_length = sizeof(struct sockaddr_in);
682 i_server_afi[0].addr_length = sizeof(struct in_addr);
683 i_server_afi[0].port_offset = (long)&sin.sin_port - (long)&sin;
684 i_server_afi[0].addr_offset = (long)&sin.sin_addr - (long)&sin;
685 /* AF_INET6 */
686 i_server_afi[1].sock_length = sizeof(struct sockaddr_in6);
687 i_server_afi[1].addr_length = sizeof(struct in6_addr);
688 i_server_afi[1].port_offset = (long)&sin6.sin6_port - (long)&sin6;
689 i_server_afi[1].addr_offset = (long)&sin6.sin6_addr - (long)&sin6;
690
691 for (i = 0; i < limit; i++)
692 server_afi[i] = NULL;
693 server_afi[AF_INET] = &i_server_afi[0];
694 server_afi[AF_INET6] = &i_server_afi[1];
695
696 server_afi_initialized = true;
697 return true;
698
699 err:
700 if (i_server_afi != NULL)
701 free(i_server_afi);
702 if (server_afi != NULL)
703 free(server_afi);
704 return false;
705 }
706
707 static struct iface *
708 parse_ifaces(char *hostnames, char *addresses)
709 {
710 struct iface *ifaces;
711 char *hosts[65], *ips[65];
712 struct server_sockaddr saddr;
713 int n, i;
714
715 ifaces = NULL;
716
717 if (hostnames != NULL && addresses != NULL && *hostnames != '\0' &&
718 *addresses != '\0') {
719 if ((n = mm_straspl(hosts, hostnames, 64)) ==
720 mm_straspl(ips, addresses, 64)) {
721 if ((ifaces = malloc(sizeof(struct iface) * (n + 1))) != NULL) {
722 /* Setup our array */
723 for (i = 0; i < n; i++) {
724 mm_memclr(&saddr, sizeof(struct server_sockaddr));
725 /*
726 * Determine family type. If ':' is found, assume
727 * IPv6, assume IPv4 otherwise.
728 */
729 *(SERVER_SOCKADDR_FAMILY(&saddr)) =
730 (strchr(ips[i], ':') == NULL ? AF_INET : AF_INET6);
731 /* Parse user supplied address */
732 if ((inet_pton(*(SERVER_SOCKADDR_FAMILY(&saddr)), ips[i],
733 SERVER_SOCKADDR_ADDRESS(&saddr))) == 1) {
734 strncpy(ifaces[i].hostname, hosts[i], 255);
735 strncpy(ifaces[i].address_str, ips[i], 63);
736 ifaces[i].sock = -1;
737 ifaces[i].address = saddr;
738 *(ifaces[i + 1].hostname) = '\0';
739 } else {
740 syslog(LOG_NOTICE,
741 "parse_ifaces() - Invalid address [%s]",
742 ips[i]);
743 free(ifaces);
744 ifaces = NULL;
745 break;
746 }
747 }
748 }
749 } else
750 syslog(LOG_NOTICE,
751 "parse_ifaces() - Number of hostnames and addresses mismatch");
752 }
753
754 return (ifaces);
755 }
756
757
758 /* SERVER (THREAD) */
759
760 /* This is started by pthread_create(), it calls the user handler function,
761 * which only has to care about serving the client.
762 */
763 static void
764 phandleclient(pthread_object_t *obj, void *args, void *udata)
765 {
766 phandleinfo *phi;
767 int socket;
768 clientlistnode *clnode;
769 char *tmp;
770 struct iface *iface;
771 struct async_clenv *aclenv;
772 unsigned long id;
773
774 /* Get and discard our passed parameters */
775 phi = (phandleinfo *)args;
776 id = phi->id;
777 socket = phi->socket;
778 clnode = phi->clnode;
779 iface = phi->iface;
780 pthread_mutex_lock(&ppool_lock);
781 pool_free((pnode_t *)phi);
782 pthread_mutex_unlock(&ppool_lock);
783
784 if ((aclenv = async_open_clenv())) {
785 if (clnode->resolve) {
786 /* We want to resolve the client's IP address' hostname, but not
787 * if it already was done for another client currently logged on
788 * from that same address.
789 */
790 tmp = NULL;
791 /* We need to determine if hostname should be resolved. */
792 if (clnode->hostname == NULL) {
793 /* It seems so, so let's do it without clobbering the mutex */
794 tmp = resolve_hostname(aclenv, &(clnode->client));
795 }
796 if (tmp) {
797 /* Lock the mutex for a very short moment */
798 pthread_mutex_lock(&ctable_lock);
799 /* Verify again for NULL to avoid a potential memory leak */
800 if (clnode->hostname == NULL)
801 clnode->hostname = tmp;
802 else
803 tmp = mmstrfree(tmp);
804 pthread_mutex_unlock(&ctable_lock);
805 }
806 }
807
808 /* Handle the client */
809 (void) handleclientfunc(id, socket, clnode, iface, aclenv, udata);
810
811 async_close_clenv(aclenv);
812 } else
813 syslog(LOG_NOTICE, "phandleclient() - async_open_clenv()");
814
815 /* Cleanup */
816
817 /* Close connection with client */
818 if (socket != -1) {
819 shutdown(socket, 2);
820 close(socket);
821 }
822
823 /* Decrease our connection/refcount counter, and let our ctable thread
824 * decide if/when to uncache our node.
825 */
826 pthread_mutex_lock(&ctable_lock);
827 if (clnode->connections > 0)
828 clnode->connections--;
829 pthread_mutex_unlock(&ctable_lock);
830
831 /* kthxbye :) */
832 }
833
834
835
836 /* This consists of a general purpose thread which can serve real
837 * asynchroneous functions via another thread, suitable to use for functions
838 * which can block the whole process when using non-preemptive threads like
839 * the mm_pthread_util library provides. Efficient interthread message ports
840 * are used to communicate with this device in a way that processes waiting for
841 * results only block the requesting thread. The function internally uses unix
842 * datagrams to similarly communicate arguments and obtain back results from a
843 * free process in the asynchroneous processes pool. This system can be used
844 * for parallel processing with a non-preemptive threading library or for
845 * tasks which are best served by another process (i.e. stack issues, etc).
846 * The caller should of course expect data rather than pointers to be used for
847 * both arguments and return values since pointers are only valid for the
848 * current process.
849 *
850 * Drawbacks exist though; socket send() and recv() calls are internally
851 * required, as well as memory copy operations. Moreover, two new
852 * filedescriptor are required in the main process for each asynchroneous
853 * process in our pool.
854 * It can be used where necessary, like calculating MD5 hashes, resolving
855 * hostnames and evaluating directory tree sizes recursively, etc.
856 *
857 * It would have been possible to use different datagram sizes to transfer
858 * arguments and results to/from the other processes, but because of the way
859 * AmigaOS-style messages work, a decision was made so that unions are
860 * used by async functions and the whole structure is transfered back and
861 * forth.
862 */
863
864 /* ARGSUSED */
865 static void *
866 async_thread(void *args)
867 {
868 list_t queue, *freeprocs = &async->freeprocs;
869 struct async_idx *idx = NULL;
870 struct async_proc *proc;
871 struct pollfd *pfds = NULL;
872 char *ptr;
873 int i, nfds;
874 size_t idx_size, pfd_size;
875
876 /* freeprocs is our pool of available async processes, queue is our
877 * queue of requests waiting to be processed and replied to.
878 * We use the idx array to fast-index process node via an fd which
879 * status changed during poll().
880 */
881
882 /* Initialize our queue, fd->process index and poll array */
883 i = 0;
884 DLIST_INIT(&queue);
885 DLIST_FOREACH(freeprocs, proc)
886 if (i < proc->sock)
887 i = proc->sock;
888 nfds = freeprocs->nodes;
889 idx_size = (size_t)OALIGN_CEIL(sizeof(struct async_idx) *
890 (i + 1), long);
891 pfd_size = (size_t)OALIGN_CEIL(sizeof(struct pollfd) *
892 (nfds + 1), long);
893 if ((ptr = malloc(idx_size + pfd_size)) != NULL ) {
894 idx = (struct async_idx *)ptr;
895 ptr += idx_size;
896 pfds = (struct pollfd *)ptr;
897 } else {
898 /* Fatal error */
899 syslog(LOG_NOTICE, "async_thread() - malloc(%llu)",
900 (unsigned long long)idx_size + pfd_size);
901 pthread_exit(NULL);
902 }
903 i = 0;
904 DLIST_FOREACH(freeprocs, proc) {
905 idx[proc->sock].aproc = proc;
906 pfds[i].fd = proc->sock;
907 pfds[i].events = POLLIN;
908 pfds[i].revents = 0;
909 i++;
910 }
911
912 for (;;) {
913 char *omsg;
914
915 /* First check for new messages and queue them in our queue list if
916 * any, but only wait for them if all processes are free (we don't
917 * expect any results).
918 */
919 while ((omsg = (char *)pthread_msg_get(&async->port)) != NULL)
920 DLIST_APPEND(&queue, (node_t *)(omsg - sizeof(pnode_t)));
921 if (DLIST_NODES(freeprocs) == async->nprocs &&
922 DLIST_NODES(&queue) == 0) {
923 pthread_ring_wait(&async->ring, NULL);
924 continue;
925 }
926
927 /* Verify for any available processes to dispatch requests to, and
928 * if so, dispatch requests from the queue until no more processes
929 * are available or the queue is empty.
930 */
931 if (DLIST_NODES(&queue) > 0 && DLIST_NODES(freeprocs) > 0) {
932 struct async_proc *p;
933 struct async_msg *m;
934 size_t len;
935
936 while ((p = DLIST_TOP(freeprocs)) != NULL &&
937 (m = DLIST_TOP(&queue)) != NULL) {
938 if (DEBUG_TRUE(m->func_id < async->nfuncs)) {
939 len = async->funcs[m->func_id].msg_len;
940 idx[p->sock].aclenv = m->aclenv;
941 if ((send(p->sock, m, len, 0)) == len)
942 DLIST_UNLINK(freeprocs, (node_t *)p);
943 else
944 syslog(LOG_NOTICE, "async_thread() - send(%d:%d)",
945 p->sock, (int)len);
946 } else
947 DEBUG_PRINTF("Unknown function %u", m->func_id);
948 DLIST_UNLINK(&queue, (node_t *)m);
949 }
950 }
951
952 /* Wait for results from our async processes via their socket,
953 * but be sure to break when new inter-thread messages are available.
954 * If new results are obtained, reply back to the caller thread.
955 * If all processes are free, go back to pthread_ring_wait()
956 * instead of pthread_poll_ring().
957 */
958 while (DLIST_NODES(freeprocs) < async->nprocs) {
959 int selr, i;
960
961 selr = pthread_poll_ring(pfds, nfds, -1, &async->ring);
962 if (selr == -1 && errno == ECANCELED)
963 break;
964 for (i = 0; selr > 0 && i < nfds; i++) {
965 if (pfds[i].revents & POLLERR) {
966 /* If this happens something is really wrong, exit */
967 syslog(LOG_NOTICE, "async_thread() - POLLERR(%d)",
968 pfds[i].fd);
969 kill(0, SIGTERM);
970 for (;;)
971 pthread_sleep(30);
972 }
973 if (pfds[i].revents & POLLIN) {
974 int fd = pfds[i].fd;
975 size_t len;
976 struct async_proc *p = idx[fd].aproc;
977 struct async_clenv *e = idx[fd].aclenv;
978
979 /* Results, reply message back to client thread and
980 * return this process node in the free pool.
981 */
982 if ((len = recv(fd, e->msg, async->msg_len,
983 MSG_WAITALL)) < sizeof(struct async_msg))
984 syslog(LOG_NOTICE, "async_thread() - recv(%d:%d)",
985 fd, (int)async->msg_len);
986 pthread_msg_reply(&(e->msg->msg));
987 DLIST_APPEND(freeprocs, (node_t *)p);
988 selr--;
989 }
990 }
991 }
992 }
993
994 /* NOTREACHED */
995 pthread_exit(NULL);
996 return (NULL);
997 }
998
999
1000 /* This function should be called at early application startup to setup
1001 * the asynchroneous pool of processes. async_init_pthread() call may then
1002 * be called later on after pthread_init() to initialize the client-side
1003 * asynchroneous context (required before using async_open_clenv()).
1004 * This function is assumed to be called once only, and it's effects to
1005 * only be discarded when the main process exits.
1006 */
1007 bool
1008 async_init(struct async_func *funcs, int procs, uid_t uid, gid_t *gids,
1009 int ngids)
1010 {
1011 unsigned int i, nfuncs;
1012 size_t funcs_len, msg_len, env_len;
1013 bool res = false;
1014
1015 /* Evaluate required space to hold user functions plus our resolve one */
1016 for (i = 0; funcs[i].func; i++) ;
1017 nfuncs = i + 1;
1018 funcs_len = sizeof(struct async_func) * (nfuncs + 1);
1019
1020 /* Evaluate maximum message size we should expect for arguments */
1021 msg_len = sizeof(struct async_resolvehostname_msg);
1022 for (i = 0; i < nfuncs; i++) {
1023 if (msg_len < funcs[i].msg_len)
1024 msg_len = funcs[i].msg_len;
1025 }
1026
1027 /* Necessary allocations */
1028 env_len = (size_t)OALIGN_CEIL(sizeof(struct async_env), long);
1029 funcs_len = (size_t)OALIGN_CEIL(funcs_len, long);
1030 msg_len = (size_t)OALIGN_CEIL(msg_len, long);
1031 if ((async = calloc(env_len + funcs_len + msg_len, 1)) != NULL) {
1032 if (pool_init(&async->freeprocs_pool, "freeprocs_pool",
1033 malloc, free, NULL, NULL, sizeof(struct async_proc),
1034 procs, 1, 1)) {
1035 register char *ptr = (char *)async;
1036
1037 ptr += env_len;
1038 async->funcs = (struct async_func *)ptr;
1039 ptr += funcs_len;
1040 async->msg = (struct async_msg *)ptr;
1041 async->nfuncs = nfuncs + 1;
1042 async->nprocs = procs;
1043 async->msg_len = msg_len;
1044 /* Fill in functions */
1045 async->funcs[0].func = async_resolvehostname;
1046 async->funcs[0].msg_len = sizeof(struct async_resolvehostname_msg);
1047 for (i = 0; i < nfuncs; i++) {
1048 async->funcs[i + 1].func = funcs[i].func;
1049 async->funcs[i + 1].msg_len = funcs[i].msg_len;
1050 }
1051 DLIST_INIT(&async->freeprocs);
1052 res = true;
1053 } else
1054 syslog(LOG_NOTICE, "async_init() - pool_init(freeprocs_pool)");
1055 } else
1056 syslog(LOG_NOTICE, "async_init() - calloc(%llu)",
1057 (unsigned long long)env_len + funcs_len + msg_len);
1058
1059 if (res) {
1060 /* Start async slave processes */
1061 if (!spawn_async_procs(uid, gids, ngids, msg_len)) {
1062 syslog(LOG_NOTICE, "async_init() - spawn_async_procs()");
1063 res = false;
1064 }
1065 }
1066
1067 if (!res) {
1068 if (async) {
1069 pool_destroy(&async->freeprocs_pool);
1070 free(async);
1071 async = NULL;
1072 }
1073 }
1074
1075 return (res);
1076 }
1077
1078
1079 /* This function is used by an application after pthread_init() to initialize
1080 * the asynchroneous thread device/server, which will dispatch requests to
1081 * the asynchroneous pool of processes in a thread-safe manner (without
1082 * blocking the whole process. async_init() should have been called first.
1083 */
1084 bool
1085 async_init_pthread(void)
1086 {
1087 size_t env_len;
1088 bool res = false;
1089
1090 if (DEBUG_TRUE(async != NULL)) {
1091
1092 pthread_poll_init();
1093
1094 res = true;
1095 pthread_mutex_init(&async->apool_lock, NULL);
1096 /* Setup our fast-allocation pool for client-side async environments */
1097 env_len = (size_t)OALIGN_CEIL(sizeof(struct async_clenv), long);
1098 if (!pool_init(&async->apool, "asyncenv_pool", malloc, free,
1099 aclenv_constructor, aclenv_destructor,
1100 env_len + async->msg_len,
1101 16384 / (env_len + async->msg_len), 0, 0)) {
1102 syslog(LOG_NOTICE, "async_init_pthread() - pool_init(apool)");
1103 res = false;
1104 }
1105
1106 if (res) {
1107 pthread_attr_t attrs;
1108 pthread_t thread;
1109
1110 res = false;
1111 /* Start async slave thread device */
1112 if (pthread_port_init(&async->port) == 0) {
1113 if (pthread_ring_init(&async->ring) == 0) {
1114 pthread_port_set_ring(&async->port, &async->ring);
1115 pthread_attr_init(&attrs);
1116 pthread_attr_setdetachstate(&attrs,
1117 PTHREAD_CREATE_DETACHED);
1118 if ((pthread_create(&thread, &attrs, async_thread, NULL))
1119 == 0)
1120 res = true;
1121 } else
1122 syslog(LOG_NOTICE, "async_init_pth() - pth_event()");
1123 } else
1124 syslog(LOG_NOTICE,
1125 "async_init_pth() - pth_msgport_create()");
1126 pthread_attr_destroy(&attrs);
1127 }
1128
1129 if (!res) {
1130 if (pthread_port_valid(&async->port))
1131 pthread_port_destroy(&async->port);
1132 if (pthread_ring_valid(&async->ring))
1133 pthread_ring_destroy(&async->ring);
1134 pool_destroy(&async->apool);
1135 }
1136
1137 } else
1138 DEBUG_PRINTF("Call async_init() first");
1139
1140 return (res);
1141 }
1142
1143
1144 /* Used by a client thread to initialize a context suitable to call the
1145 * async_call() function with. Each thread needs one to use the device.
1146 * async_init() and async_init_pthread() should have been called first.
1147 */
1148 static struct async_clenv *
1149 async_open_clenv(void)
1150 {
1151 struct async_clenv *aclenv = NULL;
1152
1153 if (DEBUG_TRUE(async != NULL && POOL_VALID(&async->apool))) {
1154 /* Optimized for speed using a pool of pre-allocated nodes */
1155 pthread_mutex_lock(&async->apool_lock);
1156 aclenv = (struct async_clenv *)pool_alloc(&async->apool, false);
1157 pthread_mutex_unlock(&async->apool_lock);
1158 } else
1159 DEBUG_PRINTF("Call async_init() and async_init_pth() first");
1160
1161 return aclenv;
1162 }
1163
1164 /* Destroys an async_clenv which was previously initialized using the
1165 * async_open_clenv() function.
1166 */
1167 static struct async_clenv *
1168 async_close_clenv(struct async_clenv *aclenv)
1169 {
1170 pthread_mutex_lock(&async->apool_lock);
1171 pool_free((pnode_t *)aclenv);
1172 pthread_mutex_unlock(&async->apool_lock);
1173
1174 return (NULL);
1175 }
1176
1177 static bool
1178 aclenv_constructor(pnode_t *pnode)
1179 {
1180 struct async_clenv *aclenv = (struct async_clenv *)pnode;
1181
1182 if (pthread_port_init(&aclenv->port) == 0) {
1183 if (pthread_ring_init(&aclenv->ring) == 0) {
1184 register char *ptr = (char *)aclenv;
1185
1186 ptr += (size_t)OALIGN_CEIL(sizeof(struct async_clenv), long);
1187 aclenv->msg = (struct async_msg *)ptr;
1188 pthread_msg_init(&aclenv->msg->msg, &aclenv->port);
1189 pthread_port_set_ring(&aclenv->port, &aclenv->ring);
1190
1191 return true;
1192 }
1193 pthread_port_destroy(&aclenv->port);
1194 }
1195
1196 return false;
1197 }
1198
1199 static void aclenv_destructor(pnode_t *pnode)
1200 {
1201 struct async_clenv *aclenv = (struct async_clenv *)pnode;
1202
1203 pthread_msg_destroy(&aclenv->msg->msg);
1204 pthread_port_destroy(&aclenv->port);
1205 pthread_ring_destroy(&aclenv->ring);
1206 }
1207
1208
1209
1210 /* This is the function which is used to call async functions handled by
1211 * async_thread() and our pool of slave processes. It does not check for
1212 * timeout, the async functions set should be sure to always return within a
1213 * reasonable delay if they could block indefinitely.
1214 */
1215 void
1216 async_call(struct async_clenv *aclenv, unsigned int function)
1217 {
1218 /* Send request */
1219 aclenv->msg->func_id = function;
1220 aclenv->msg->aclenv = aclenv;
1221 pthread_msg_put(&async->port, &(aclenv->msg->msg));
1222
1223 /* Sleep until we get reply back, in a thread-safe manner.
1224 * Note: This will permanently block the current thread if the
1225 * task does not complete. It would be possible to respect a timeout
1226 * here, however this could cause problems. For instance, we may have
1227 * sent a resolve request which takes some time because of a network
1228 * problem, and the client could disconnect (and this thread end).
1229 * The async function reply would then attempt to use a no longer
1230 * existing port. (It could look for the port name in the list, but
1231 * this would have considerable performance drawbacks. Ideally,
1232 * any async operation would be abortable by the thread that initiated
1233 * it. This could be tricky to implement in order to work for everything
1234 * properly, avoiding resource leaks). So blocking the thread until
1235 * we get a reply back seems a good choice here; The async functions
1236 * should ensure to eventually return.
1237 */
1238 while (pthread_msg_get(&aclenv->port) == NULL)
1239 pthread_ring_wait(&aclenv->ring, NULL);
1240 }
1241
1242
1243 /* This function is called by async_init() and starts up all slave
1244 * asynchroneous processes. It knows when to stop when the blocklist has no
1245 * more entries. It is expected that make_daemon() was used first, so that
1246 * filedescriptors 0-2 be already redirected to /dev/null.
1247 */
1248 static bool
1249 spawn_async_procs(uid_t uid, gid_t *gids, int ngids, size_t msg_len)
1250 {
1251 struct async_proc *aproc;
1252 int s[2], opt;
1253 bool ret = false;
1254
1255 if (async == NULL)
1256 return ret;
1257
1258 do {
1259 if ((aproc = (struct async_proc *)pool_alloc(&async->freeprocs_pool,
1260 true))) {
1261 if ((socketpair(PF_UNIX, SOCK_DGRAM, 0, s)) == 0) {
1262
1263 /* Set buffer sizes to make sure that atomic datagram
1264 * operations work as expected for the maximum packet size
1265 */
1266 opt = (int)BALIGN_CEIL(msg_len, 1024);
1267 if (setsockopt(s[0], SOL_SOCKET, SO_SNDBUF, &opt,
1268 sizeof(int)) == -1 ||
1269 setsockopt(s[0], SOL_SOCKET, SO_RCVBUF, &opt,
1270 sizeof(int)) == -1 ||
1271 setsockopt(s[1], SOL_SOCKET, SO_SNDBUF, &opt,
1272 sizeof(int)) == -1 ||
1273 setsockopt(s[1], SOL_SOCKET, SO_RCVBUF, &opt,
1274 sizeof(int)) == -1)
1275 DEBUG_PRINTF("setsockopt(SO_*BUF)");
1276
1277 aproc->sock = s[0];
1278 /* Create new process */
1279 if (!(aproc->pid = fork())) {
1280 close(s[0]);
1281
1282 /* Child */
1283 chdir("/");
1284 umask(0);
1285
1286 /* Finally drop privileges and perform our tasks */
1287 if (mmdropprivs(uid, gids, ngids)) {
1288 close(s[0]);
1289 while ((aproc = DLIST_TOP(&async->freeprocs))
1290 != NULL) {
1291 close(aproc->sock);
1292 DLIST_UNLINK(&async->freeprocs, (node_t *)aproc);
1293 }
1294 async_proc_main(s[1]);
1295 } else
1296 syslog(LOG_NOTICE,
1297 "spawn_async_procs() - mmdropprivs()");
1298
1299 } else if (aproc->pid == -1) {
1300 pool_free((pnode_t *)aproc);
1301 aproc = NULL;
1302 syslog(LOG_NOTICE, "spawn_async_procs() - fork()");
1303 close(s[0]);
1304 close(s[1]);
1305 }
1306 } else
1307 syslog(LOG_NOTICE, "spawn_async_procs() - socketpair()");
1308 }
1309 if (aproc) {
1310 close(s[1]);
1311 DLIST_APPEND(&async->freeprocs, (node_t *)aproc);
1312 }
1313 else ret = true;
1314 } while (aproc);
1315
1316 return (ret);
1317 }
1318
1319
1320 /* The main loop of each asynchroneous process in the pool */
1321 static void
1322 async_proc_main(int sock)
1323 {
1324 struct pollfd pfd[] = {
1325 {-1, POLLIN | POLLERR, 0}
1326 };
1327
1328 #ifndef __GLIBC__
1329 setproctitle("Asynchroneous server process");
1330 #endif
1331
1332 pfd[0].fd = sock;
1333
1334 (void) server_init_afi();
1335 for (;;) {
1336 int selr;
1337 unsigned int func_id;
1338 size_t len;
1339
1340 /* Wait for packets, process them and send result packets */
1341 if ((selr = poll(pfd, 1, -1))) {
1342 if (selr == 1) {
1343 if (pfd[0].revents & POLLERR) {
1344 /* This should not happen. If it does something really
1345 * went wrong; Exit.
1346 */
1347 syslog(LOG_NOTICE,
1348 "async_proc_main() - Socket error (%d)", sock);
1349 kill(0, SIGTERM);
1350 for (;;)
1351 sleep(30);
1352 }
1353 if (pfd[0].revents & POLLIN) {
1354 if ((len = recv(sock, async->msg, async->msg_len,
1355 MSG_WAITALL))
1356 >= sizeof(struct async_msg)) {
1357 func_id = async->msg->func_id;
1358 if (func_id < async->nfuncs &&
1359 len == async->funcs[func_id].msg_len) {
1360 async->funcs[func_id].func(async->msg);
1361 if ((send(sock, async->msg, len, 0)) != len)
1362 syslog(LOG_NOTICE,
1363 "async_proc_main() - send(%d:%d)",
1364 sock, (int)len);
1365 } else
1366 syslog(LOG_NOTICE, "async_proc_main() - "
1367 "Invalid function %u on socket %d",
1368 func_id, sock);
1369 } else
1370 syslog(LOG_NOTICE, "async_proc_main() - recv(%d:%d)",
1371 sock, (int)async->msg_len);
1372 }
1373 }
1374 }
1375 }
1376 }
1377
1378
1379 /* This function will always be available in the asynchoneous functions set,
1380 * referenced as ASYNC_RESOLVEHOSTNAME when using async_call().
1381 */
1382 static void
1383 async_resolvehostname(struct async_msg *msg)
1384 {
1385 struct async_resolvehostname_msg *amsg = (void *)msg;
1386 struct server_sockaddr *saddr = &(amsg->un.args.saddr);
1387
1388 if ((getnameinfo(SERVER_SOCKADDR(saddr), SERVER_SOCKADDR_SOCKLEN(saddr),
1389 amsg->un.res.hostname, 255, NULL, 0, 0)) != 0) {
1390 DEBUG_PRINTF("getnameinfo()");
1391 strcpy(amsg->un.res.hostname, "unknown");
1392 }
1393 }
1394
1395
1396 /* Easy function wrapper for async_resolvehostname(). mmstrfree() should
1397 * be called to release the supplied result string when no longer needed.
1398 * This function is thread-safe, as well as asynchroneous, the current
1399 * process (all threads) will remain free to execute other tasks when
1400 * the current thread waits for the hostname resolution to complete, as the
1401 * request is dispatched to one of the asynchroneous processes in our pool.
1402 */
1403 char *
1404 resolve_hostname(struct async_clenv *aclenv, struct server_sockaddr *saddr)
1405 {
1406 char *tmp;
1407 struct async_resolvehostname_msg *amsg = (void *)aclenv->msg;
1408
1409 if ((tmp = mmstralloc(255)) != NULL) {
1410 memcpy(&(amsg->un.args.saddr), saddr,
1411 sizeof(struct server_sockaddr));
1412 async_call(aclenv, ASYNC_RESOLVEHOSTNAME);
1413 strncpy(tmp, amsg->un.res.hostname, 255);
1414 } else
1415 syslog(LOG_NOTICE, "resolve_hostname() - mmstralloc()");
1416
1417 return (tmp);
1418 }
1419
1420
1421 /* These are related to the clnode ipaddress hash table. */
1422
1423 /* Used to efficiently hash to 32-bit the supplied server_sockaddr keys */
1424 /* ARGSUSED */
1425 static uint32_t
1426 server_sockaddr_hash(const void *d, size_t len)
1427 {
1428 const struct server_sockaddr *sa = d;
1429 uint32_t hash = 0, *words;
1430
1431 switch (sa->ss_family) {
1432 case AF_INET:
1433 /* Very fast, already stored internally as a 32-bit value */
1434 hash = (uint32_t)sa->u.sockaddr_in.sin_addr.s_addr;
1435 break;
1436 case AF_INET6:
1437 /* Requires hashing 4 32-bit words, rather fast */
1438 words = (uint32_t *)&sa->u.sockaddr_in6.sin6_addr;
1439 /* hash = words[0] ^ words[1] ^ words[2] ^ words[3]; */
1440 hash = words[0] + (67306411U * hash);
1441 hash = words[1] + (67306411U * hash);
1442 hash = words[2] + (67306411U * hash);
1443 hash = words[3] + (67306411U * hash);
1444 break;
1445 default:
1446 DEBUG_PRINTF("Invalid family %d!", sa->ss_family);
1447 }
1448
1449 return hash;
1450 }
1451
1452 /* And to efficiently compare the supplied server_sockaddr keys */
1453 /* ARGSUSED */
1454 static int
1455 server_sockaddr_cmp(const void *s, const void *d, size_t len)
1456 {
1457 const struct server_sockaddr *sa = s, *da = d;
1458 uint32_t *sawords, *dawords;
1459
1460 if (sa->ss_family != da->ss_family)
1461 return -1;
1462
1463 switch(sa->ss_family) {
1464 case AF_INET:
1465 /* Single 32-bit word */
1466 if (sa->u.sockaddr_in.sin_addr.s_addr !=
1467 da->u.sockaddr_in.sin_addr.s_addr)
1468 return -1;
1469 break;
1470 case AF_INET6:
1471 /* 4 32-bit words */
1472 sawords = (uint32_t *)&sa->u.sockaddr_in6.sin6_addr;
1473 dawords = (uint32_t *)&da->u.sockaddr_in6.sin6_addr;
1474 if (sawords[0] != dawords[0] || sawords[1] != dawords[1] ||
1475 sawords[2] != dawords[2] || sawords[3] != dawords[3])
1476 return -1;
1477 break;
1478 default:
1479 DEBUG_PRINTF("Invalid family %d!", sa->ss_family);
1480 return -1;
1481 }
1482
1483 return 0;
1484 }
1485
1486
1487 /* This is the actual ctable hash control and expiration thread */
1488 /* ARGSUSED */
1489 static void *
1490 clnode_expire_thread(void *args)
1491 {
1492 long period = *((long *)args);
1493 struct clnode_expire_thread_iterator_udata data;
1494
1495 /* Set initial timeout to maximum allowed */
1496 data.soonest = period;
1497 data.cnt = 0;
1498 for (;;) {
1499 /* Sleep until it is known that at least one node expired */
1500 pthread_sleep((unsigned int)data.soonest);
1501 /* Tell our iterator function the current time and the maximum
1502 * allowed time to wait to
1503 */
1504 data.current = time(NULL);
1505 data.soonest = period;
1506 /* Lock ctable, reset expired nodes, garbage collect, and set
1507 * data.soonest to the time of the soonest next expireing node.
1508 */
1509 pthread_mutex_lock(&ctable_lock);
1510 if (HASHTABLE_NODES(&ctable) > 0)
1511 hashtable_iterate(&ctable, clnode_expire_thread_iterator, &data);
1512 pthread_mutex_unlock(&ctable_lock);
1513 }
1514
1515 /* NOTREACHED */
1516 pthread_exit(NULL);
1517 return NULL;
1518 }
1519
1520
1521 static bool
1522 clnode_expire_thread_iterator(hashnode_t *hnod, void *udata)
1523 {
1524 clientlistnode *clnode = (clientlistnode *)hnod;
1525 struct clnode_expire_thread_iterator_udata *data = udata;
1526 time_t rem;
1527
1528 /* If the node expired, reset it. For nodes which do not, record the
1529 * soonest to expire node. For nodes which expired and for which no
1530 * connections exist anymore, expunge them.
1531 */
1532 if ((rem = LR_REMAINS(&clnode->lr, data->current)) == 0) {
1533 /* This entry expired */
1534 if (clnode->connections == 0) {
1535 /* Safe to expunge this node from the cache */
1536 hashtable_unlink(&ctable, (hashnode_t *)clnode);
1537 pool_free((pnode_t *)clnode);
1538 } else {
1539 /* Reset it */
1540 LR_EXPIRE(&clnode->lr, data->current);
1541 rem = LR_REMAINS(&clnode->lr, data->current);
1542 }
1543 }
1544 if (rem != 0 && data->soonest > rem)
1545 data->soonest = rem;
1546
1547 return true;
1548 }