daemon_start.c (115829B)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */ 2 /* 3 This file is part of GNU libmicrohttpd. 4 Copyright (C) 2024 Evgeny Grin (Karlson2k) 5 6 GNU libmicrohttpd is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Lesser General Public 8 License as published by the Free Software Foundation; either 9 version 2.1 of the License, or (at your option) any later version. 10 11 GNU libmicrohttpd is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Lesser General Public License for more details. 15 16 Alternatively, you can redistribute GNU libmicrohttpd and/or 17 modify it under the terms of the GNU General Public License as 18 published by the Free Software Foundation; either version 2 of 19 the License, or (at your option) any later version, together 20 with the eCos exception, as follows: 21 22 As a special exception, if other files instantiate templates or 23 use macros or inline functions from this file, or you compile this 24 file and link it with other works to produce a work based on this 25 file, this file does not by itself cause the resulting work to be 26 covered by the GNU General Public License. However the source code 27 for this file must still be made available in accordance with 28 section (3) of the GNU General Public License v2. 29 30 This exception does not invalidate any other reasons why a work 31 based on this file might be covered by the GNU General Public 32 License. 33 34 You should have received copies of the GNU Lesser General Public 35 License and the GNU General Public License along with this library; 36 if not, see <https://www.gnu.org/licenses/>. 37 */ 38 39 /** 40 * @file src/mhd2/daemon_start.c 41 * @brief The implementation of the MHD_daemon_start() 42 * @author Karlson2k (Evgeny Grin) 43 */ 44 45 #include "mhd_sys_options.h" 46 47 #include "mhd_assert.h" 48 #include "mhd_unreachable.h" 49 50 #include "sys_bool_type.h" 51 #include "sys_base_types.h" 52 #include "sys_malloc.h" 53 #include "compat_calloc.h" 54 55 #include <string.h> 56 #include "sys_sockets_types.h" 57 #include "sys_sockets_headers.h" 58 #include "mhd_sockets_macros.h" 59 #include "sys_ip_headers.h" 60 61 #ifdef MHD_SOCKETS_KIND_POSIX 62 # include "sys_errno.h" 63 #endif 64 #ifdef MHD_SUPPORT_EPOLL 65 # include <sys/epoll.h> 66 #endif 67 68 #ifdef MHD_SOCKETS_KIND_POSIX 69 # include <fcntl.h> 70 # ifdef MHD_SUPPORT_SELECT 71 # ifdef HAVE_SYS_SELECT_H 72 # include <sys/select.h> /* For FD_SETSIZE */ 73 # else 74 # ifdef HAVE_SYS_TIME_H 75 # include <sys/time.h> 76 # endif 77 # ifdef HAVE_SYS_TYPES_H 78 # include <sys/types.h> 79 # endif 80 # ifdef HAVE_UNISTD_H 81 # include <unistd.h> 82 # endif 83 # endif 84 # endif 85 #endif 86 87 #include "extr_events_funcs.h" 88 89 #include "mhd_dbg_print.h" 90 91 #include "mhd_limits.h" 92 93 #include "mhd_daemon.h" 94 #include "daemon_options.h" 95 96 #include "mhd_sockets_funcs.h" 97 98 #include "mhd_lib_init.h" 99 #include "daemon_logger.h" 100 101 #ifdef MHD_SUPPORT_HTTPS 102 # include "mhd_tls_common.h" 103 # include "mhd_tls_funcs.h" 104 #endif 105 106 #include "events_process.h" 107 108 #ifdef MHD_SUPPORT_THREADS 109 # include "mhd_itc.h" 110 # include "mhd_threads.h" 111 # include "daemon_funcs.h" 112 #endif 113 114 #include "mhd_public_api.h" 115 116 117 /** 118 * The default value for fastopen queue length (currently GNU/Linux only) 119 */ 120 #define MHD_TCP_FASTOPEN_DEF_QUEUE_LEN 64 121 122 /** 123 * Release any internally allocated pointers, then deallocate the settings. 124 * @param s the pointer to the settings to release 125 */ 126 static void 127 dsettings_release (struct DaemonOptions *s) 128 { 129 /* Release starting from the last member */ 130 if (NULL != s->random_entropy.v_buf) 131 free (s->random_entropy.v_buf); 132 if (MHD_INVALID_SOCKET != s->listen_socket) 133 mhd_socket_close (s->listen_socket); 134 if (NULL != s->bind_sa.v_sa) 135 free (s->bind_sa.v_sa); 136 if (NULL != s->tls_cert_key.v_mem_cert) 137 free (s->tls_cert_key.v_mem_cert); 138 free (s); 139 } 140 141 142 /** 143 * Set basic daemon parameters that not require additional initialisation. 144 * Mostly copy such parameters from the settings object to the daemon object. 145 * @param d the daemon object 146 * @param s the user settings 147 * @return MHD_SC_OK on success, 148 * the error code otherwise 149 */ 150 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 151 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 152 daemon_set_basic_settings (struct MHD_Daemon *restrict d, 153 struct DaemonOptions *restrict s) 154 { 155 static const uint_fast64_t max_timeout_ms_value = 156 ((uint_fast64_t) ~((uint_fast64_t) 0)) / 8; 157 158 #ifdef MHD_SUPPORT_HTTP2 159 // TODO: make it configurable 160 d->http_cfg.http1x = true; 161 d->http_cfg.http2 = true; 162 #endif /* MHD_SUPPORT_HTTP2 */ 163 164 d->req_cfg.strictness = s->protocol_strict_level.v_sl; 165 166 #ifdef MHD_SUPPORT_COOKIES 167 d->req_cfg.disable_cookies = (MHD_NO != s->disable_cookies); 168 #endif 169 170 d->req_cfg.suppress_date = (MHD_NO != s->suppress_date_header); 171 172 d->conns.cfg.timeout_ms = ((uint_fast64_t) s->default_timeout) * 1000u; 173 if (d->conns.cfg.timeout_ms / 1000u != s->default_timeout) 174 d->conns.cfg.timeout_ms = max_timeout_ms_value; 175 else if (max_timeout_ms_value < d->conns.cfg.timeout_ms) 176 d->conns.cfg.timeout_ms = max_timeout_ms_value; 177 178 d->conns.cfg.per_ip_limit = s->per_ip_limit; 179 180 return MHD_SC_OK; 181 } 182 183 184 /** 185 * Set the daemon work mode. 186 * This function also checks whether requested work mode is supported by 187 * current build and whether work mode is compatible with requested events 188 * polling technique. 189 * @param d the daemon object 190 * @param s the user settings 191 * @return MHD_SC_OK on success, 192 * the error code otherwise 193 */ 194 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 195 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 196 daemon_set_work_mode (struct MHD_Daemon *restrict d, 197 struct DaemonOptions *restrict s) 198 { 199 switch (s->work_mode.mode) 200 { 201 case MHD_WM_EXTERNAL_PERIODIC: 202 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS; 203 break; 204 case MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL: 205 case MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE: 206 if (MHD_SPS_AUTO != s->poll_syscall) 207 { 208 mhd_LOG_MSG ( \ 209 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 210 "The requested work mode is not compatible with setting " \ 211 "socket polling syscall."); 212 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 213 } 214 if (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL == s->work_mode.mode) 215 d->wmode_int = mhd_WM_INT_EXTERNAL_EVENTS_LEVEL; 216 else 217 d->wmode_int = mhd_WM_INT_EXTERNAL_EVENTS_EDGE; 218 break; 219 case MHD_WM_EXTERNAL_SINGLE_FD_WATCH: 220 if ((MHD_SPS_AUTO != s->poll_syscall) && 221 (MHD_SPS_EPOLL != s->poll_syscall)) 222 { 223 mhd_LOG_MSG ( \ 224 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 225 "The requested work mode MHD_WM_EXTERNAL_SINGLE_FD_WATCH " \ 226 "is not compatible with requested socket polling syscall."); 227 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 228 } 229 #ifndef MHD_SUPPORT_EPOLL 230 mhd_LOG_MSG ( \ 231 d, MHD_SC_FEATURE_DISABLED, \ 232 "The epoll is required for the requested work mode " \ 233 "MHD_WM_EXTERNAL_SINGLE_FD_WATCH, but not available on this " \ 234 "platform or MHD build."); 235 return MHD_SC_FEATURE_DISABLED; 236 #else 237 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS; 238 #endif 239 break; 240 case MHD_WM_THREAD_PER_CONNECTION: 241 if (MHD_SPS_EPOLL == s->poll_syscall) 242 { 243 mhd_LOG_MSG ( \ 244 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 245 "The requested work mode MHD_WM_THREAD_PER_CONNECTION " \ 246 "is not compatible with 'epoll' sockets polling."); 247 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 248 } 249 mhd_FALLTHROUGH; 250 /* Intentional fallthrough */ 251 case MHD_WM_WORKER_THREADS: 252 #ifndef MHD_SUPPORT_THREADS 253 mhd_LOG_MSG (d, MHD_SC_FEATURE_DISABLED, \ 254 "The internal threads modes are not supported by this " \ 255 "build of MHD."); 256 return MHD_SC_FEATURE_DISABLED; 257 #else /* MHD_SUPPORT_THREADS */ 258 if (MHD_WM_THREAD_PER_CONNECTION == s->work_mode.mode) 259 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION; 260 else if (1 >= s->work_mode.params.num_worker_threads) /* && (MHD_WM_WORKER_THREADS == s->work_mode.mode) */ 261 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_ONE_THREAD; 262 else 263 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL; 264 #endif /* MHD_SUPPORT_THREADS */ 265 break; 266 default: 267 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_UNEXPECTED_WM, \ 268 "Wrong requested work mode."); 269 return MHD_SC_CONFIGURATION_UNEXPECTED_WM; 270 } 271 272 if ((mhd_WM_INT_EXTERNAL_EVENTS_LEVEL != d->wmode_int) && 273 (mhd_WM_INT_EXTERNAL_EVENTS_EDGE != d->wmode_int) && 274 (MHD_NO != s->reregister_all)) 275 { 276 mhd_LOG_MSG ( \ 277 d, \ 278 MHD_SC_EXTERNAL_EVENT_ONLY, \ 279 "The MHD_D_O_REREGISTER_ALL option can be used only with external " \ 280 "events work modes."); 281 return MHD_SC_EXTERNAL_EVENT_ONLY; 282 } 283 284 return MHD_SC_OK; 285 } 286 287 288 union mhd_SockaddrAny 289 { 290 struct sockaddr sa; 291 struct sockaddr_in sa_i4; 292 #ifdef HAVE_INET6 293 struct sockaddr_in6 sa_i6; 294 #endif /* HAVE_INET6 */ 295 struct sockaddr_storage sa_stor; 296 }; 297 298 299 /** 300 * The type of the socket to create 301 */ 302 enum mhd_CreateSktType 303 { 304 /** 305 * Unknown address family (could be IP or not IP) 306 */ 307 mhd_SKT_UNKNOWN = -4 308 , 309 /** 310 * The socket is not IP. 311 */ 312 mhd_SKT_NON_IP = -2 313 , 314 /** 315 * The socket is UNIX. 316 */ 317 mhd_SKT_UNIX = -1 318 , 319 /** 320 * No socket 321 */ 322 mhd_SKT_NO_SOCKET = MHD_AF_NONE 323 , 324 /** 325 * IPv4 only 326 */ 327 mhd_SKT_IP_V4_ONLY = MHD_AF_INET4 328 , 329 /** 330 * IPv6 only 331 */ 332 mhd_SKT_IP_V6_ONLY = MHD_AF_INET6 333 , 334 /** 335 * IPv6 with dual stack enabled 336 */ 337 mhd_SKT_IP_DUAL_REQUIRED = MHD_AF_DUAL 338 , 339 /** 340 * Try IPv6 with dual stack then IPv4 341 */ 342 mhd_SKT_IP_V4_WITH_V6_OPT = MHD_AF_DUAL_v6_OPTIONAL 343 , 344 /** 345 * IPv6 with optional dual stack 346 */ 347 mhd_SKT_IP_V6_WITH_V4_OPT = MHD_AF_DUAL_v4_OPTIONAL 348 , 349 /** 350 * Try IPv4 then IPv6 with optional dual stack 351 */ 352 mhd_SKT_IP_V4_WITH_FALLBACK = 16 353 }; 354 355 /** 356 * Create socket, bind to the address and start listening on the socket. 357 * 358 * The socket is assigned to the daemon as listening FD. 359 * @param d the daemon to use 360 * @param s the user settings 361 * @param v6_tried true if IPv6 has been tried already 362 * @param force_v6_any_dual true if IPv6 is forced with dual stack either 363 * enabled or not 364 * @param prev_bnd_lstn_err if this function was already tried with another and 365 * failed to bind or to start listening then 366 * this parameter must be set to respecting status 367 * code, otherwise this parameter must be #MHD_SC_OK 368 * @return #MHD_SC_OK on success, 369 * the error code otherwise (no error printed to log if result is 370 * #MHD_SC_LISTEN_SOCKET_BIND_FAILED or #MHD_SC_LISTEN_FAILURE) 371 */ 372 static enum MHD_StatusCode 373 create_bind_listen_stream_socket_inner (struct MHD_Daemon *restrict d, 374 struct DaemonOptions *restrict s, 375 bool v6_tried, 376 bool force_v6_any_dual, 377 enum MHD_StatusCode prev_bnd_lstn_err) 378 { 379 MHD_Socket sk; 380 enum mhd_CreateSktType sk_type; 381 bool sk_already_listening; 382 union mhd_SockaddrAny sa_all; 383 const struct sockaddr *p_use_sa; 384 socklen_t use_sa_size; 385 uint_least16_t sk_port; 386 bool is_non_block; 387 bool is_non_inhr; 388 enum MHD_StatusCode ret; 389 390 sk = MHD_INVALID_SOCKET; 391 sk_type = mhd_SKT_NO_SOCKET; 392 sk_already_listening = false; 393 p_use_sa = NULL; 394 use_sa_size = 0; 395 sk_port = 0; 396 397 #ifndef HAVE_INET6 398 mhd_assert (! v6_tried); 399 mhd_assert (! force_v6_any_dual); 400 #endif 401 mhd_assert (mhd_SKT_NO_SOCKET == sk_type); /* Mute analyser warning */ 402 403 if (MHD_INVALID_SOCKET != s->listen_socket) 404 { 405 mhd_assert (! v6_tried); 406 mhd_assert (! force_v6_any_dual); 407 /* Check for options conflicts */ 408 if (0 != s->bind_sa.v_sa_len) 409 { 410 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 411 "MHD_D_O_BIND_SA cannot be used together " \ 412 "with MHD_D_O_LISTEN_SOCKET"); 413 return MHD_SC_OPTIONS_CONFLICT; 414 } 415 else if (MHD_AF_NONE != s->bind_port.v_af) 416 { 417 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 418 "MHD_D_O_BIND_PORT cannot be used together " \ 419 "with MHD_D_O_LISTEN_SOCKET"); 420 return MHD_SC_OPTIONS_CONFLICT; 421 } 422 423 /* No options conflicts */ 424 sk = s->listen_socket; 425 s->listen_socket = MHD_INVALID_SOCKET; /* Prevent closing with settings cleanup */ 426 sk_type = mhd_SKT_UNKNOWN; 427 sk_already_listening = true; 428 } 429 else if ((0 != s->bind_sa.v_sa_len) || (MHD_AF_NONE != s->bind_port.v_af)) 430 { 431 if (0 != s->bind_sa.v_sa_len) 432 { 433 mhd_assert (! v6_tried); 434 mhd_assert (! force_v6_any_dual); 435 436 /* Check for options conflicts */ 437 if (MHD_AF_NONE != s->bind_port.v_af) 438 { 439 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 440 "MHD_D_O_BIND_SA cannot be used together " \ 441 "with MHD_D_O_BIND_PORT"); 442 return MHD_SC_OPTIONS_CONFLICT; 443 } 444 445 /* No options conflicts */ 446 switch (s->bind_sa.v_sa->sa_family) 447 { 448 case AF_INET: 449 sk_type = mhd_SKT_IP_V4_ONLY; 450 if (sizeof(sa_all.sa_i4) > s->bind_sa.v_sa_len) 451 { 452 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_WRONG_SA_SIZE, \ 453 "The size of the provided sockaddr does not match " 454 "used address family"); 455 return MHD_SC_CONFIGURATION_WRONG_SA_SIZE; 456 } 457 memcpy (&(sa_all.sa_i4), s->bind_sa.v_sa, sizeof(sa_all.sa_i4)); 458 sk_port = (uint_least16_t) ntohs (sa_all.sa_i4.sin_port); 459 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 460 mhd_assert (sizeof(sa_all.sa_i4) == (uint8_t) sizeof(sa_all.sa_i4)); 461 sa_all.sa_i4.sin_len = (uint8_t) sizeof(sa_all.sa_i4); 462 #endif 463 p_use_sa = (struct sockaddr *) &(sa_all.sa_i4); 464 use_sa_size = (socklen_t) sizeof(sa_all.sa_i4); 465 break; 466 #ifdef HAVE_INET6 467 case AF_INET6: 468 sk_type = mhd_SKT_IP_V6_ONLY; 469 if (sizeof(sa_all.sa_i6) > s->bind_sa.v_sa_len) 470 { 471 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_WRONG_SA_SIZE, \ 472 "The size of the provided sockaddr does not match " 473 "used address family"); 474 return MHD_SC_CONFIGURATION_WRONG_SA_SIZE; 475 } 476 memcpy (&(sa_all.sa_i6), s->bind_sa.v_sa, s->bind_sa.v_sa_len); 477 sk_port = (uint_least16_t) ntohs (sa_all.sa_i6.sin6_port); 478 #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 479 mhd_assert (sizeof(sa_all.sa_i6) == (uint8_t) sizeof(sa_all.sa_i6)); 480 sa_all.sa_i6.sin6_len = (uint8_t) sizeof(sa_all.sa_i6); 481 #endif 482 p_use_sa = (struct sockaddr *) &(sa_all.sa_i6); 483 use_sa_size = (socklen_t) sizeof(sa_all.sa_i6); 484 break; 485 #endif /* HAVE_INET6 */ 486 #ifdef MHD_AF_UNIX 487 case MHD_AF_UNIX: 488 sk_type = mhd_SKT_UNIX; 489 p_use_sa = NULL; /* To be set below */ 490 break; 491 #endif /* MHD_AF_UNIX */ 492 default: 493 sk_type = mhd_SKT_UNKNOWN; 494 p_use_sa = NULL; /* To be set below */ 495 break; 496 } 497 498 if (s->bind_sa.v_dual) 499 { 500 if (mhd_SKT_IP_V6_ONLY != sk_type) 501 { 502 mhd_LOG_MSG (d, MHD_SC_LISTEN_DUAL_STACK_NOT_SUITABLE, \ 503 "IP dual stack is not possible for provided sockaddr"); 504 } 505 #ifdef HAVE_INET6 506 else 507 { 508 #ifdef HAVE_DCLR_IPV6_V6ONLY 509 sk_type = mhd_SKT_IP_DUAL_REQUIRED; 510 #else /* ! IPV6_V6ONLY */ 511 mhd_LOG_MSG (d, \ 512 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_NOT_SUPPORTED, \ 513 "IP dual stack is not supported by this platform or " \ 514 "by this MHD build"); 515 #endif /* ! IPV6_V6ONLY */ 516 } 517 #endif /* HAVE_INET6 */ 518 } 519 520 if (NULL == p_use_sa) 521 { 522 #if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) && \ 523 defined(HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN) 524 if ((((size_t) s->bind_sa.v_sa->sa_len) != s->bind_sa.v_sa_len) && 525 (sizeof(sa_all) >= s->bind_sa.v_sa_len)) 526 { 527 /* Fix embedded 'sa_len' member if possible */ 528 memcpy (&sa_all, s->bind_sa.v_sa, s->bind_sa.v_sa_len); 529 mhd_assert (s->bind_sa.v_sa_len == (uint8_t) s->bind_sa.v_sa_len); 530 sa_all.sa_stor.ss_len = (uint8_t) s->bind_sa.v_sa_len; 531 p_use_sa = (const struct sockaddr *) &(sa_all.sa_stor); 532 } 533 else 534 #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN && HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN */ 535 p_use_sa = s->bind_sa.v_sa; 536 use_sa_size = (socklen_t) s->bind_sa.v_sa_len; 537 } 538 } 539 else /* if (MHD_AF_NONE != s->bind_port.v_af) */ 540 { 541 /* No options conflicts */ 542 switch (s->bind_port.v_af) 543 { 544 case MHD_AF_NONE: 545 mhd_assert (0); 546 mhd_UNREACHABLE (); 547 return MHD_SC_INTERNAL_ERROR; 548 case MHD_AF_AUTO: 549 #ifdef HAVE_INET6 550 #ifdef HAVE_DCLR_IPV6_V6ONLY 551 if (force_v6_any_dual) 552 sk_type = mhd_SKT_IP_V6_WITH_V4_OPT; 553 else if (v6_tried) 554 sk_type = mhd_SKT_IP_V4_WITH_FALLBACK; 555 else 556 sk_type = mhd_SKT_IP_V4_WITH_V6_OPT; 557 #else /* ! IPV6_V6ONLY */ 558 mhd_assert (! v6_tried); 559 if (force_v6_any_dual) 560 sk_type = mhd_SKT_IP_V6_ONLY; 561 else 562 sk_type = mhd_SKT_IP_V4_WITH_FALLBACK; 563 #endif /* ! IPV6_V6ONLY */ 564 #else /* ! HAVE_INET6 */ 565 sk_type = mhd_SKT_IP_V4_ONLY; 566 #endif /* ! HAVE_INET6 */ 567 break; 568 case MHD_AF_INET4: 569 mhd_assert (! v6_tried); 570 mhd_assert (! force_v6_any_dual); 571 sk_type = mhd_SKT_IP_V4_ONLY; 572 break; 573 case MHD_AF_INET6: 574 mhd_assert (! v6_tried); 575 mhd_assert (! force_v6_any_dual); 576 #ifdef HAVE_INET6 577 sk_type = mhd_SKT_IP_V6_ONLY; 578 #else /* ! HAVE_INET6 */ 579 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 580 "IPv6 is not supported by this MHD build or " \ 581 "by this platform"); 582 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 583 #endif /* ! HAVE_INET6 */ 584 break; 585 case MHD_AF_DUAL: 586 mhd_assert (! v6_tried); 587 mhd_assert (! force_v6_any_dual); 588 #ifdef HAVE_INET6 589 #ifdef HAVE_DCLR_IPV6_V6ONLY 590 sk_type = mhd_SKT_IP_DUAL_REQUIRED; 591 #else /* ! IPV6_V6ONLY */ 592 mhd_LOG_MSG (d, 593 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_NOT_SUPPORTED, \ 594 "IP dual stack is not supported by this platform or " \ 595 "by this MHD build"); 596 sk_type = mhd_SKT_IP_V6_ONLY; 597 #endif /* ! IPV6_V6ONLY */ 598 #else /* ! HAVE_INET6 */ 599 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 600 "IPv6 is not supported by this MHD build or " \ 601 "by this platform"); 602 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 603 #endif /* ! HAVE_INET6 */ 604 break; 605 case MHD_AF_DUAL_v4_OPTIONAL: 606 mhd_assert (! v6_tried); 607 mhd_assert (! force_v6_any_dual); 608 #ifdef HAVE_INET6 609 #ifdef HAVE_DCLR_IPV6_V6ONLY 610 sk_type = mhd_SKT_IP_V6_WITH_V4_OPT; 611 #else /* ! IPV6_V6ONLY */ 612 sk_type = mhd_SKT_IP_V6_ONLY; 613 #endif /* ! IPV6_V6ONLY */ 614 #else /* ! HAVE_INET6 */ 615 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 616 "IPv6 is not supported by this MHD build or " \ 617 "by this platform"); 618 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 619 #endif /* ! HAVE_INET6 */ 620 break; 621 case MHD_AF_DUAL_v6_OPTIONAL: 622 mhd_assert (! force_v6_any_dual); 623 #ifdef HAVE_INET6 624 #ifdef HAVE_DCLR_IPV6_V6ONLY 625 sk_type = (! v6_tried) ? 626 mhd_SKT_IP_V4_WITH_V6_OPT : mhd_SKT_IP_V4_ONLY; 627 #else /* ! IPV6_V6ONLY */ 628 mhd_assert (! v6_tried); 629 sk_type = mhd_SKT_IP_V4_ONLY; 630 #endif /* ! IPV6_V6ONLY */ 631 #else /* ! HAVE_INET6 */ 632 mhd_assert (! v6_tried); 633 sk_type = mhd_SKT_IP_V4_ONLY; 634 #endif /* ! HAVE_INET6 */ 635 break; 636 default: 637 mhd_LOG_MSG (d, MHD_SC_AF_NOT_SUPPORTED_BY_BUILD, \ 638 "Unknown address family specified"); 639 return MHD_SC_AF_NOT_SUPPORTED_BY_BUILD; 640 } 641 642 mhd_assert (mhd_SKT_NO_SOCKET < sk_type); 643 644 switch (sk_type) 645 { 646 case mhd_SKT_IP_V4_ONLY: 647 case mhd_SKT_IP_V4_WITH_FALLBACK: 648 /* Zeroing is not required, but may help on exotic platforms */ 649 memset (&(sa_all.sa_i4), 0, sizeof(sa_all.sa_i4)); 650 sa_all.sa_i4.sin_family = AF_INET; 651 sa_all.sa_i4.sin_port = htons (s->bind_port.v_port); 652 sa_all.sa_i4.sin_addr.s_addr = INADDR_ANY; 653 if (0 != INADDR_ANY) /* Optimised at compile time */ 654 sa_all.sa_i4.sin_addr.s_addr = htonl (INADDR_ANY); 655 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 656 sa_all.sa_i4.sin_len = (uint8_t) sizeof (sa_all.sa_i4); 657 #endif 658 p_use_sa = (const struct sockaddr *) &(sa_all.sa_i4); 659 use_sa_size = (socklen_t) sizeof (sa_all.sa_i4); 660 break; 661 case mhd_SKT_IP_V6_ONLY: 662 case mhd_SKT_IP_DUAL_REQUIRED: 663 case mhd_SKT_IP_V4_WITH_V6_OPT: 664 case mhd_SKT_IP_V6_WITH_V4_OPT: 665 #ifdef HAVE_INET6 666 if (1) 667 { 668 #ifdef IN6ADDR_ANY_INIT 669 static const struct in6_addr static_in6any = IN6ADDR_ANY_INIT; 670 #endif 671 /* Zeroing is required by POSIX */ 672 memset (&(sa_all.sa_i6), 0, sizeof(sa_all.sa_i6)); 673 sa_all.sa_i6.sin6_family = AF_INET6; 674 sa_all.sa_i6.sin6_port = htons (s->bind_port.v_port); 675 #ifdef IN6ADDR_ANY_INIT /* Optional assignment at the address is all zeros anyway */ 676 sa_all.sa_i6.sin6_addr = static_in6any; 677 #endif 678 #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 679 sa_all.sa_i6.sin6_len = (uint8_t) sizeof (sa_all.sa_i6); 680 #endif 681 p_use_sa = (const struct sockaddr *) &(sa_all.sa_i6); 682 use_sa_size = (socklen_t) sizeof (sa_all.sa_i6); 683 } 684 break; 685 #endif /* HAVE_INET6 */ 686 case mhd_SKT_UNKNOWN: 687 case mhd_SKT_NON_IP: 688 case mhd_SKT_UNIX: 689 case mhd_SKT_NO_SOCKET: 690 default: 691 mhd_UNREACHABLE (); 692 return MHD_SC_INTERNAL_ERROR; 693 } 694 695 sk_port = s->bind_port.v_port; 696 697 } 698 } 699 else 700 { 701 /* No listen socket */ 702 d->net.listen.fd = MHD_INVALID_SOCKET; 703 d->net.listen.is_broken = false; 704 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 705 d->net.listen.non_block = false; 706 d->net.listen.port = 0; 707 708 return MHD_SC_OK; 709 } 710 711 mhd_assert (mhd_SKT_NO_SOCKET != sk_type); 712 mhd_assert ((NULL != p_use_sa) || sk_already_listening); 713 mhd_assert ((MHD_INVALID_SOCKET == sk) || sk_already_listening); 714 715 if (MHD_INVALID_SOCKET == sk) 716 { 717 mhd_assert (NULL != p_use_sa); 718 #if defined(MHD_SOCKETS_KIND_WINSOCK) && defined(WSA_FLAG_NO_HANDLE_INHERIT) 719 /* May fail before Win7 SP1 */ 720 sk = WSASocketW (p_use_sa->sa_family, SOCK_STREAM, 0, 721 NULL, 0, WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT); 722 723 if (MHD_INVALID_SOCKET == sk) 724 #endif /* MHD_SOCKETS_KIND_WINSOCK && WSA_FLAG_NO_HANDLE_INHERIT */ 725 sk = socket (p_use_sa->sa_family, 726 SOCK_STREAM | mhd_SOCK_NONBLOCK 727 | mhd_SOCK_CLOEXEC | mhd_SOCK_NOSIGPIPE, 0); 728 729 if (MHD_INVALID_SOCKET == sk) 730 { 731 #ifdef HAVE_INET6 732 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 733 return create_bind_listen_stream_socket_inner (d, 734 s, 735 v6_tried, 736 true, 737 prev_bnd_lstn_err); 738 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 739 return create_bind_listen_stream_socket_inner (d, 740 s, 741 true, 742 false, 743 prev_bnd_lstn_err); 744 #endif /* HAVE_INET6 */ 745 746 if (MHD_SC_OK != prev_bnd_lstn_err) 747 return prev_bnd_lstn_err; 748 749 if (mhd_SCKT_LERR_IS_AF ()) 750 { 751 mhd_LOG_MSG (d, MHD_SC_AF_NOT_AVAILABLE, \ 752 "The requested socket address family is rejected " \ 753 "by the OS"); 754 return MHD_SC_AF_NOT_AVAILABLE; 755 } 756 mhd_LOG_MSG (d, MHD_SC_FAILED_TO_OPEN_LISTEN_SOCKET, \ 757 "Failed to open listen socket"); 758 759 return MHD_SC_FAILED_TO_OPEN_LISTEN_SOCKET; 760 } 761 is_non_block = (0 != mhd_SOCK_NONBLOCK); 762 is_non_inhr = (0 != mhd_SOCK_CLOEXEC); 763 } 764 else 765 { 766 is_non_block = false; /* Try to set non-block */ 767 is_non_inhr = false; /* Try to set non-inheritable */ 768 } 769 770 /* The listen socket must be closed if error code returned 771 beyond this point */ 772 773 ret = MHD_SC_OK; 774 775 do 776 { /* The scope for automatic socket close for error returns */ 777 if (! mhd_FD_FITS_DAEMON (d,sk)) 778 { 779 mhd_LOG_MSG (d, MHD_SC_LISTEN_FD_OUTSIDE_OF_SET_RANGE, \ 780 "The listen FD value is higher than allowed"); 781 ret = MHD_SC_LISTEN_FD_OUTSIDE_OF_SET_RANGE; 782 break; 783 } 784 785 if (! is_non_inhr) 786 { 787 if (! mhd_socket_noninheritable (sk)) 788 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NOINHERIT_FAILED, \ 789 "OS refused to make the listen socket non-inheritable"); 790 } 791 792 if (! sk_already_listening) 793 { 794 #ifdef HAVE_INET6 795 #ifdef HAVE_DCLR_IPV6_V6ONLY 796 if ((mhd_SKT_IP_V6_ONLY == sk_type) || 797 (mhd_SKT_IP_DUAL_REQUIRED == sk_type) || 798 (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) || 799 (mhd_SKT_IP_V6_WITH_V4_OPT == sk_type) || 800 (mhd_SKT_UNKNOWN == sk_type)) 801 { 802 mhd_SCKT_OPT_BOOL no_dual_to_set; 803 bool use_dual; 804 805 use_dual = ((mhd_SKT_IP_DUAL_REQUIRED == sk_type) || 806 (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) || 807 (mhd_SKT_IP_V6_WITH_V4_OPT == sk_type)); 808 no_dual_to_set = use_dual ? 0 : 1; 809 810 if (0 != mhd_setsockopt (sk, IPPROTO_IPV6, IPV6_V6ONLY, 811 (void *) &no_dual_to_set, 812 sizeof (no_dual_to_set))) 813 { 814 mhd_SCKT_OPT_BOOL no_dual_current; 815 socklen_t opt_size; 816 bool state_unknown; 817 bool state_match; 818 819 no_dual_current = 0; 820 opt_size = sizeof(no_dual_current); 821 822 /* Some platforms forbid setting this options, but allow 823 reading. */ 824 if ((0 != mhd_getsockopt (sk, IPPROTO_IPV6, IPV6_V6ONLY, 825 (void*) &no_dual_current, &opt_size)) 826 || (((socklen_t) sizeof(no_dual_current)) < opt_size)) 827 { 828 state_unknown = true; 829 state_match = false; 830 } 831 else 832 { 833 state_unknown = false; 834 state_match = ((! ! no_dual_current) == (! ! no_dual_to_set)); 835 } 836 837 if (state_unknown || ! state_match) 838 { 839 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 840 { 841 (void) mhd_socket_close (sk); 842 return create_bind_listen_stream_socket_inner (d, 843 s, 844 true, 845 false, 846 prev_bnd_lstn_err); 847 } 848 if (! state_unknown) 849 { 850 /* The dual-stack state is definitely wrong */ 851 if (mhd_SKT_IP_V6_ONLY == sk_type) 852 { 853 mhd_LOG_MSG ( \ 854 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED, \ 855 "Failed to disable IP dual-stack configuration " \ 856 "for the listen socket"); 857 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 858 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED : 859 prev_bnd_lstn_err; 860 break; 861 } 862 else if (mhd_SKT_UNKNOWN != sk_type) 863 { 864 mhd_LOG_MSG ( \ 865 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED, \ 866 "Cannot enable IP dual-stack configuration " \ 867 "for the listen socket"); 868 if (mhd_SKT_IP_DUAL_REQUIRED == sk_type) 869 { 870 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 871 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED : 872 prev_bnd_lstn_err; 873 break; 874 } 875 } 876 } 877 else 878 { 879 /* The dual-stack state is unknown */ 880 if (mhd_SKT_UNKNOWN != sk_type) 881 mhd_LOG_MSG ( 882 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_UNKNOWN, \ 883 "Failed to set dual-stack (IPV6_ONLY) configuration " \ 884 "for the listen socket, using system defaults"); 885 } 886 } 887 } 888 } 889 #else /* ! IPV6_V6ONLY */ 890 mhd_assert (mhd_SKT_IP_DUAL_REQUIRED != sk_type); 891 mhd_assert (mhd_SKT_IP_V4_WITH_V6_OPT != sk_type); 892 mhd_assert (mhd_SKT_IP_V6_WITH_V4_OPT != sk_type); 893 #endif /* ! IPV6_V6ONLY */ 894 #endif /* HAVE_INET6 */ 895 896 if (MHD_FOM_AUTO <= d->settings->tcp_fastopen.v_option) 897 { 898 #if defined(HAVE_DCLR_TCP_FASTOPEN) 899 int fo_param; 900 #ifdef __linux__ 901 /* The parameter is the queue length */ 902 fo_param = (int) d->settings->tcp_fastopen.v_queue_length; 903 if (0 == fo_param) 904 fo_param = MHD_TCP_FASTOPEN_DEF_QUEUE_LEN; 905 #else /* ! __linux__ */ 906 fo_param = 1; /* The parameter is on/off type of setting */ 907 #endif /* ! __linux__ */ 908 if (0 != mhd_setsockopt (sk, IPPROTO_TCP, TCP_FASTOPEN, 909 (const void *) &fo_param, 910 sizeof (fo_param))) 911 { 912 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAST_OPEN_FAILURE, \ 913 "OS refused to enable TCP Fast Open on " \ 914 "the listen socket"); 915 if (MHD_FOM_AUTO < d->settings->tcp_fastopen.v_option) 916 { 917 ret = MHD_SC_LISTEN_FAST_OPEN_FAILURE; 918 break; 919 } 920 } 921 #else /* ! TCP_FASTOPEN */ 922 if (MHD_FOM_AUTO < d->settings->tcp_fastopen.v_option) 923 { 924 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAST_OPEN_FAILURE, \ 925 "The OS does not support TCP Fast Open"); 926 ret = MHD_SC_LISTEN_FAST_OPEN_FAILURE; 927 break; 928 } 929 #endif 930 } 931 932 if (MHD_D_OPTION_BIND_TYPE_NOT_SHARED >= d->settings->listen_addr_reuse) 933 { 934 #ifndef MHD_SOCKETS_KIND_WINSOCK 935 #ifdef HAVE_DCLR_SO_REUSEADDR 936 mhd_SCKT_OPT_BOOL on_val1 = 1; 937 if (0 != mhd_setsockopt (sk, SOL_SOCKET, SO_REUSEADDR, 938 (const void *) &on_val1, sizeof (on_val1))) 939 { 940 mhd_LOG_MSG (d, MHD_SC_LISTEN_PORT_REUSE_ENABLE_FAILED, \ 941 "OS refused to enable address reuse on " \ 942 "the listen socket"); 943 } 944 #else /* ! SO_REUSEADDR */ 945 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED, \ 946 "The OS does not support address reuse for sockets"); 947 #endif /* ! SO_REUSEADDR */ 948 #endif /* ! MHD_SOCKETS_KIND_WINSOCK */ 949 if (MHD_D_OPTION_BIND_TYPE_NOT_SHARED > d->settings->listen_addr_reuse) 950 { 951 #if defined(HAVE_DCLR_SO_REUSEPORT) || defined(MHD_SOCKETS_KIND_WINSOCK) 952 int opt_name; 953 mhd_SCKT_OPT_BOOL on_val2 = 1; 954 #ifndef MHD_SOCKETS_KIND_WINSOCK 955 opt_name = SO_REUSEPORT; 956 #else /* ! MHD_SOCKETS_KIND_WINSOCK */ 957 opt_name = SO_REUSEADDR; /* On W32 it is the same as SO_REUSEPORT on other platforms */ 958 #endif /* ! MHD_SOCKETS_KIND_WINSOCK */ 959 if (0 != mhd_setsockopt (sk, \ 960 SOL_SOCKET, \ 961 opt_name, \ 962 (const void *) &on_val2, \ 963 sizeof (on_val2))) 964 { 965 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_FAILED, \ 966 "OS refused to enable address sharing " \ 967 "on the listen socket"); 968 ret = MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_FAILED; 969 break; 970 } 971 #else /* ! SO_REUSEADDR && ! MHD_SOCKETS_KIND_WINSOCK */ 972 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED, \ 973 "The OS does not support address sharing for sockets"); 974 ret = MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED; 975 break; 976 #endif /* ! SO_REUSEADDR && ! MHD_SOCKETS_KIND_WINSOCK */ 977 } 978 } 979 #if defined(SO_EXCLUSIVEADDRUSE) || defined(SO_EXCLBIND) 980 else if (MHD_D_OPTION_BIND_TYPE_EXCLUSIVE <= 981 d->settings->listen_addr_reuse) 982 { 983 int opt_name; 984 mhd_SCKT_OPT_BOOL on_val = 1; 985 #ifdef SO_EXCLUSIVEADDRUSE 986 opt_name = SO_EXCLUSIVEADDRUSE; 987 #else 988 opt_name = SO_EXCLBIND; 989 #endif 990 if (0 != mhd_setsockopt (sk, \ 991 SOL_SOCKET, \ 992 opt_name, \ 993 (const void *) &on_val, \ 994 sizeof (on_val))) 995 { 996 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_EXCLUSIVE_ENABLE_FAILED, \ 997 "OS refused to enable exclusive address use " \ 998 "on the listen socket"); 999 ret = MHD_SC_LISTEN_ADDRESS_EXCLUSIVE_ENABLE_FAILED; 1000 break; 1001 } 1002 } 1003 #endif /* SO_EXCLUSIVEADDRUSE || SO_EXCLBIND */ 1004 1005 mhd_assert (NULL != p_use_sa); 1006 mhd_assert (0 != use_sa_size); 1007 if (0 != bind (sk, p_use_sa, use_sa_size)) 1008 { 1009 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 1010 MHD_SC_LISTEN_SOCKET_BIND_FAILED : prev_bnd_lstn_err; 1011 #ifdef HAVE_INET6 1012 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 1013 { 1014 (void) mhd_socket_close (sk); 1015 return create_bind_listen_stream_socket_inner (d, 1016 s, 1017 v6_tried, 1018 true, 1019 ret); 1020 } 1021 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 1022 { 1023 (void) mhd_socket_close (sk); 1024 return create_bind_listen_stream_socket_inner (d, 1025 s, 1026 true, 1027 false, 1028 ret); 1029 } 1030 #endif /* HAVE_INET6 */ 1031 break; 1032 } 1033 1034 if (1) 1035 { 1036 int accept_queue_len; 1037 accept_queue_len = (int) s->listen_backlog; 1038 if (0 > accept_queue_len) 1039 accept_queue_len = 0; 1040 if (0 == accept_queue_len) 1041 { 1042 #if defined(SOMAXCONN) || defined(HAVE_DCLR_SOMAXCONN) 1043 accept_queue_len = SOMAXCONN; 1044 #else /* ! SOMAXCONN */ 1045 accept_queue_len = 127; /* Should be the safe value */ 1046 #endif /* ! SOMAXCONN */ 1047 } 1048 if (0 != listen (sk, accept_queue_len)) 1049 { 1050 ret = MHD_SC_LISTEN_FAILURE; 1051 #ifdef HAVE_INET6 1052 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 1053 { 1054 (void) mhd_socket_close (sk); 1055 return create_bind_listen_stream_socket_inner (d, 1056 s, 1057 v6_tried, 1058 true, 1059 ret); 1060 } 1061 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 1062 { 1063 (void) mhd_socket_close (sk); 1064 return create_bind_listen_stream_socket_inner (d, 1065 s, 1066 true, 1067 false, 1068 ret); 1069 } 1070 #endif /* HAVE_INET6 */ 1071 break; 1072 } 1073 } 1074 } 1075 /* A valid listening socket is ready here */ 1076 1077 if (! is_non_block) 1078 { 1079 is_non_block = mhd_socket_nonblocking (sk); 1080 if (! is_non_block) 1081 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE, \ 1082 "OS refused to make the listen socket non-blocking"); 1083 } 1084 1085 /* Set to the daemon only when the listening socket is fully ready */ 1086 d->net.listen.fd = sk; 1087 d->net.listen.is_broken = false; 1088 switch (sk_type) 1089 { 1090 case mhd_SKT_UNKNOWN: 1091 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 1092 break; 1093 case mhd_SKT_NON_IP: 1094 d->net.listen.type = mhd_SOCKET_TYPE_NON_IP; 1095 break; 1096 case mhd_SKT_UNIX: 1097 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1098 break; 1099 case mhd_SKT_IP_V4_ONLY: 1100 case mhd_SKT_IP_V6_ONLY: 1101 case mhd_SKT_IP_DUAL_REQUIRED: 1102 case mhd_SKT_IP_V4_WITH_V6_OPT: 1103 case mhd_SKT_IP_V6_WITH_V4_OPT: 1104 case mhd_SKT_IP_V4_WITH_FALLBACK: 1105 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1106 break; 1107 case mhd_SKT_NO_SOCKET: 1108 default: 1109 mhd_UNREACHABLE (); 1110 return MHD_SC_INTERNAL_ERROR; 1111 } 1112 d->net.listen.non_block = is_non_block; 1113 d->net.listen.port = sk_port; 1114 1115 mhd_assert (ret == MHD_SC_OK); 1116 1117 return MHD_SC_OK; 1118 1119 } while (0); 1120 1121 mhd_assert (MHD_SC_OK != ret); /* This should be only error returns here */ 1122 mhd_assert (MHD_INVALID_SOCKET != sk); 1123 (void) mhd_socket_close (sk); 1124 return ret; 1125 } 1126 1127 1128 /** 1129 * Create socket, bind to the address and start listening on the socket. 1130 * 1131 * The socket is assigned to the daemon as listening FD. 1132 * 1133 * @param d the daemon to use 1134 * @param s the user settings 1135 * @return #MHD_SC_OK on success, 1136 * the error code otherwise (no error printed to log if result is 1137 * #MHD_SC_LISTEN_SOCKET_BIND_FAILED or #MHD_SC_LISTEN_FAILURE) 1138 */ 1139 static enum MHD_StatusCode 1140 create_bind_listen_stream_socket (struct MHD_Daemon *restrict d, 1141 struct DaemonOptions *restrict s) 1142 { 1143 enum MHD_StatusCode ret; 1144 1145 ret = create_bind_listen_stream_socket_inner (d, 1146 s, 1147 false, 1148 false, 1149 MHD_SC_OK); 1150 #ifdef MHD_SUPPORT_LOG_FUNCTIONALITY 1151 if (MHD_SC_LISTEN_SOCKET_BIND_FAILED == ret) 1152 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_BIND_FAILED, \ 1153 "Failed to bind the listen socket"); 1154 else if (MHD_SC_LISTEN_FAILURE == ret) 1155 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAILURE, \ 1156 "Failed to start listening on the listen socket"); 1157 #endif /* MHD_SUPPORT_LOG_FUNCTIONALITY */ 1158 1159 return ret; 1160 } 1161 1162 1163 #ifdef MHD_USE_GETSOCKNAME 1164 /** 1165 * Detect and set the type and port of the listening socket 1166 * @param d the daemon to use 1167 */ 1168 static MHD_FN_PAR_NONNULL_ (1) void 1169 detect_listen_type_and_port (struct MHD_Daemon *restrict d) 1170 { 1171 union mhd_SockaddrAny sa_all; 1172 socklen_t sa_size; 1173 enum mhd_SocketType declared_type; 1174 1175 mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd); 1176 mhd_assert (0 == d->net.listen.port); 1177 memset (&sa_all, 0, sizeof(sa_all)); /* Actually not required */ 1178 sa_size = (socklen_t) sizeof(sa_all); 1179 1180 if (0 != getsockname (d->net.listen.fd, &(sa_all.sa), &sa_size)) 1181 { 1182 if (mhd_SOCKET_TYPE_IP == d->net.listen.type) 1183 mhd_LOG_MSG (d, MHD_SC_LISTEN_PORT_DETECT_FAILURE, \ 1184 "Failed to detect the port number on the listening socket"); 1185 return; 1186 } 1187 1188 declared_type = d->net.listen.type; 1189 if (0 == sa_size) 1190 { 1191 #ifndef __linux__ 1192 /* Used on some non-Linux platforms */ 1193 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1194 d->net.listen.port = 0; 1195 #else /* ! __linux__ */ 1196 (void) 0; 1197 #endif /* ! __linux__ */ 1198 } 1199 else 1200 { 1201 switch (sa_all.sa.sa_family) 1202 { 1203 case AF_INET: 1204 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1205 d->net.listen.port = (uint_least16_t) ntohs (sa_all.sa_i4.sin_port); 1206 break; 1207 #ifdef HAVE_INET6 1208 case AF_INET6: 1209 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1210 d->net.listen.port = (uint_least16_t) ntohs (sa_all.sa_i6.sin6_port); 1211 break; 1212 #endif /* HAVE_INET6 */ 1213 #ifdef MHD_AF_UNIX 1214 case MHD_AF_UNIX: 1215 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1216 d->net.listen.port = 0; 1217 break; 1218 #endif /* MHD_AF_UNIX */ 1219 default: 1220 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 1221 d->net.listen.port = 0; 1222 break; 1223 } 1224 } 1225 1226 if ((declared_type != d->net.listen.type) 1227 && (mhd_SOCKET_TYPE_IP == declared_type)) 1228 mhd_LOG_MSG (d, MHD_SC_UNEXPECTED_SOCKET_ERROR, \ 1229 "The type of listen socket is detected as non-IP, while " \ 1230 "the socket has been created as an IP socket"); 1231 } 1232 1233 1234 #else 1235 # define detect_listen_type_and_port(d) ((void) d) 1236 #endif 1237 1238 1239 #ifdef MHD_SUPPORT_EPOLL 1240 1241 /** 1242 * Initialise daemon's epoll FD 1243 */ 1244 static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 1245 init_epoll (struct MHD_Daemon *restrict d, 1246 bool log_failures) 1247 { 1248 int e_fd; 1249 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1250 mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \ 1251 ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) && \ 1252 (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int)))); 1253 mhd_assert ((! d->dbg.net_inited) || \ 1254 (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))); 1255 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1256 (NULL == d->events.data.epoll.events)); 1257 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1258 (MHD_INVALID_SOCKET == d->events.data.epoll.e_fd)); 1259 #ifdef HAVE_EPOLL_CREATE1 1260 e_fd = epoll_create1 (EPOLL_CLOEXEC); 1261 #else /* ! HAVE_EPOLL_CREATE1 */ 1262 e_fd = epoll_create (128); /* The number is usually ignored */ 1263 if (0 <= e_fd) 1264 { 1265 if (! mhd_socket_noninheritable (e_fd)) 1266 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_CONFIGURE_NOINHERIT_FAILED, \ 1267 "Failed to make epoll control FD non-inheritable"); 1268 } 1269 #endif /* ! HAVE_EPOLL_CREATE1 */ 1270 if (0 > e_fd) 1271 { 1272 if (log_failures) 1273 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_CREATE_FAILED, \ 1274 "Failed to create epoll control FD"); 1275 return MHD_SC_EPOLL_CTL_CREATE_FAILED; /* Failure exit point */ 1276 } 1277 1278 if (! mhd_FD_FITS_DAEMON (d, e_fd)) 1279 { 1280 if (log_failures) 1281 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE, \ 1282 "The epoll control FD value is higher than allowed"); 1283 (void) close (e_fd); 1284 return MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE; /* Failure exit point */ 1285 } 1286 1287 d->events.poll_type = mhd_POLL_TYPE_EPOLL; 1288 d->events.data.epoll.e_fd = e_fd; 1289 d->events.data.epoll.events = NULL; /* Memory allocated during event and threads init */ 1290 d->events.data.epoll.num_elements = 0; 1291 return MHD_SC_OK; /* Success exit point */ 1292 } 1293 1294 1295 /** 1296 * Deinitialise daemon's epoll FD 1297 */ 1298 MHD_FN_PAR_NONNULL_ (1) static void 1299 deinit_epoll (struct MHD_Daemon *restrict d) 1300 { 1301 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1302 /* With thread pool the epoll control FD could be migrated to the 1303 * first worker daemon. */ 1304 mhd_assert ((MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) || \ 1305 (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))); 1306 mhd_assert ((MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) || \ 1307 (mhd_D_HAS_WORKERS (d))); 1308 if (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) 1309 close (d->events.data.epoll.e_fd); 1310 } 1311 1312 1313 #endif /* MHD_SUPPORT_EPOLL */ 1314 1315 /** 1316 * Choose sockets monitoring syscall and pre-initialise it 1317 * @param d the daemon object 1318 * @param s the user settings 1319 * @return #MHD_SC_OK on success, 1320 * the error code otherwise 1321 */ 1322 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1323 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1324 daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d, 1325 struct DaemonOptions *restrict s) 1326 { 1327 enum mhd_IntPollType chosen_type; 1328 1329 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type); 1330 1331 mhd_assert ((mhd_WM_INT_EXTERNAL_EVENTS_EDGE != d->wmode_int) || \ 1332 (mhd_WM_INT_EXTERNAL_EVENTS_LEVEL != d->wmode_int) || \ 1333 (MHD_SPS_AUTO == s->poll_syscall)); 1334 1335 mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \ 1336 (mhd_WM_INT_EXTERNAL_EVENTS_EDGE == d->wmode_int) || \ 1337 (mhd_WM_INT_EXTERNAL_EVENTS_LEVEL == d->wmode_int) || \ 1338 (MHD_WM_EXTERNAL_SINGLE_FD_WATCH == s->work_mode.mode)); 1339 mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \ 1340 (d->events.poll_type == (enum mhd_IntPollType) s->poll_syscall) \ 1341 || ((MHD_SPS_AUTO == s->poll_syscall) && \ 1342 ((mhd_POLL_TYPE_EXT == d->events.poll_type) || \ 1343 mhd_POLL_TYPE_INT_IS_EPOLL (d->events.poll_type)))); 1344 1345 /* Check whether the provided parameter is in the range of expected values. 1346 Reject unsupported or disabled values. */ 1347 switch (s->poll_syscall) 1348 { 1349 case MHD_SPS_AUTO: 1350 chosen_type = mhd_POLL_TYPE_NOT_SET_YET; 1351 break; 1352 case MHD_SPS_SELECT: 1353 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1354 #ifndef MHD_SUPPORT_SELECT 1355 mhd_LOG_MSG (d, MHD_SC_SELECT_SYSCALL_NOT_AVAILABLE, \ 1356 "'select()' is not supported by the platform or " \ 1357 "this MHD build"); 1358 return MHD_SC_SELECT_SYSCALL_NOT_AVAILABLE; 1359 #else /* MHD_SUPPORT_SELECT */ 1360 chosen_type = mhd_POLL_TYPE_SELECT; 1361 #endif /* MHD_SUPPORT_SELECT */ 1362 break; 1363 case MHD_SPS_POLL: 1364 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1365 #ifndef MHD_SUPPORT_POLL 1366 mhd_LOG_MSG (d, MHD_SC_POLL_SYSCALL_NOT_AVAILABLE, \ 1367 "'poll()' is not supported by the platform or " \ 1368 "this MHD build"); 1369 return MHD_SC_POLL_SYSCALL_NOT_AVAILABLE; 1370 #else /* MHD_SUPPORT_POLL */ 1371 chosen_type = mhd_POLL_TYPE_POLL; 1372 #endif /* MHD_SUPPORT_POLL */ 1373 break; 1374 case MHD_SPS_EPOLL: 1375 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1376 #ifndef MHD_SUPPORT_EPOLL 1377 mhd_LOG_MSG (d, MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE, \ 1378 "'epoll' is not supported by the platform or " \ 1379 "this MHD build"); 1380 return MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE; 1381 #else /* MHD_SUPPORT_EPOLL */ 1382 chosen_type = mhd_POLL_TYPE_EPOLL; 1383 #endif /* MHD_SUPPORT_EPOLL */ 1384 break; 1385 default: 1386 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_UNEXPECTED_SPS, 1387 "Wrong socket polling syscall specified"); 1388 return MHD_SC_CONFIGURATION_UNEXPECTED_SPS; 1389 } 1390 1391 mhd_assert (mhd_POLL_TYPE_EXT != chosen_type); 1392 1393 if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1394 { 1395 if (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)) 1396 chosen_type = mhd_POLL_TYPE_EXT; 1397 } 1398 1399 #ifdef MHD_SUPPORT_EPOLL 1400 /* Try 'epoll' if needed or possible */ 1401 if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1402 || (mhd_POLL_TYPE_EPOLL == chosen_type)) 1403 { 1404 bool epoll_required; 1405 bool epoll_allowed; 1406 1407 epoll_required = false; 1408 if (mhd_POLL_TYPE_EPOLL == chosen_type) 1409 { 1410 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1411 epoll_required = true; 1412 } 1413 else if (MHD_WM_EXTERNAL_SINGLE_FD_WATCH == s->work_mode.mode) 1414 { 1415 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1416 epoll_required = true; 1417 } 1418 1419 epoll_allowed = true; 1420 if (mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)) 1421 { 1422 mhd_assert (! epoll_required); 1423 epoll_allowed = false; 1424 } 1425 # ifdef MHD_SUPPORT_HTTPS 1426 else if (MHD_TLS_BACKEND_NONE != s->tls) 1427 { 1428 if (! epoll_required) 1429 epoll_allowed = mhd_tls_is_edge_trigg_supported (s); 1430 /* If 'epoll' is required, but TLS backend does not support it, 1431 then continue with 'epoll' here and fail at TLS initialisation. */ 1432 /* TODO: fail here */ 1433 } 1434 # endif /* MHD_SUPPORT_HTTPS */ 1435 1436 mhd_assert (epoll_allowed || ! epoll_required); 1437 1438 if (epoll_allowed) 1439 { 1440 enum MHD_StatusCode epoll_res; 1441 1442 epoll_res = init_epoll (d, 1443 epoll_required); 1444 if (MHD_SC_OK == epoll_res) 1445 chosen_type = mhd_POLL_TYPE_EPOLL; 1446 else 1447 { 1448 if (epoll_required) 1449 return epoll_res; 1450 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == chosen_type); 1451 } 1452 } 1453 else 1454 mhd_assert (mhd_POLL_TYPE_EPOLL != chosen_type); 1455 } 1456 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1457 (0 < d->events.data.epoll.e_fd)); 1458 mhd_assert ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) == \ 1459 (mhd_POLL_TYPE_EPOLL == chosen_type)); 1460 #endif /* ! MHD_SUPPORT_EPOLL */ 1461 1462 if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1463 { 1464 #if defined(MHD_SUPPORT_POLL) 1465 chosen_type = mhd_POLL_TYPE_POLL; 1466 #elif defined(MHD_SUPPORT_SELECT) 1467 chosen_type = mhd_POLL_TYPE_SELECT; 1468 #else 1469 mhd_LOG_MSG (d, MHD_SC_FEATURE_DISABLED, \ 1470 "All suitable internal sockets polling technologies are " \ 1471 "disabled in this MHD build"); 1472 return MHD_SC_FEATURE_DISABLED; 1473 #endif 1474 } 1475 1476 switch (chosen_type) 1477 { 1478 case mhd_POLL_TYPE_EXT: 1479 mhd_assert ((MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL == s->work_mode.mode) || \ 1480 (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE == s->work_mode.mode)); 1481 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1482 d->events.poll_type = mhd_POLL_TYPE_EXT; 1483 d->events.data.extr.cb_data.cb = 1484 s->work_mode.params.v_external_event_loop_cb.reg_cb; 1485 d->events.data.extr.cb_data.cls = 1486 s->work_mode.params.v_external_event_loop_cb.reg_cb_cls; 1487 d->events.data.extr.reg_all = (MHD_NO != s->reregister_all); 1488 #ifdef MHD_SUPPORT_THREADS 1489 d->events.data.extr.itc_data.app_cntx = NULL; 1490 #endif /* MHD_SUPPORT_THREADS */ 1491 d->events.data.extr.listen_data.app_cntx = NULL; 1492 break; 1493 #ifdef MHD_SUPPORT_SELECT 1494 case mhd_POLL_TYPE_SELECT: 1495 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1496 mhd_assert (MHD_WM_EXTERNAL_SINGLE_FD_WATCH != s->work_mode.mode); 1497 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1498 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1499 mhd_assert (MHD_NO == s->reregister_all); 1500 d->events.poll_type = mhd_POLL_TYPE_SELECT; 1501 d->events.data.select.rfds = NULL; /* Memory allocated during event and threads init */ 1502 d->events.data.select.wfds = NULL; /* Memory allocated during event and threads init */ 1503 d->events.data.select.efds = NULL; /* Memory allocated during event and threads init */ 1504 break; 1505 #endif /* MHD_SUPPORT_SELECT */ 1506 #ifdef MHD_SUPPORT_POLL 1507 case mhd_POLL_TYPE_POLL: 1508 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1509 mhd_assert (MHD_WM_EXTERNAL_SINGLE_FD_WATCH != s->work_mode.mode); 1510 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1511 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1512 mhd_assert (MHD_NO == s->reregister_all); 1513 d->events.poll_type = mhd_POLL_TYPE_POLL; 1514 d->events.data.poll.fds = NULL; /* Memory allocated during event and threads init */ 1515 d->events.data.poll.rel = NULL; /* Memory allocated during event and threads init */ 1516 break; 1517 #endif /* MHD_SUPPORT_POLL */ 1518 #ifdef MHD_SUPPORT_EPOLL 1519 case mhd_POLL_TYPE_EPOLL: 1520 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1521 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1522 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1523 mhd_assert (MHD_NO == s->reregister_all); 1524 /* Pre-initialised by init_epoll() */ 1525 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1526 mhd_assert (0 <= d->events.data.epoll.e_fd); 1527 mhd_assert (NULL == d->events.data.epoll.events); 1528 break; 1529 #endif /* MHD_SUPPORT_EPOLL */ 1530 #ifndef MHD_SUPPORT_SELECT 1531 case mhd_POLL_TYPE_SELECT: 1532 #endif /* ! MHD_SUPPORT_SELECT */ 1533 #ifndef MHD_SUPPORT_POLL 1534 case mhd_POLL_TYPE_POLL: 1535 #endif /* ! MHD_SUPPORT_POLL */ 1536 case mhd_POLL_TYPE_NOT_SET_YET: 1537 default: 1538 mhd_UNREACHABLE (); 1539 return MHD_SC_INTERNAL_ERROR; 1540 } 1541 return MHD_SC_OK; 1542 } 1543 1544 1545 /** 1546 * Initialise network/sockets for the daemon. 1547 * Also choose events mode / sockets polling syscall. 1548 * @param d the daemon object 1549 * @param s the user settings 1550 * @return #MHD_SC_OK on success, 1551 * the error code otherwise 1552 */ 1553 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1554 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1555 daemon_init_net (struct MHD_Daemon *restrict d, 1556 struct DaemonOptions *restrict s) 1557 { 1558 enum MHD_StatusCode ret; 1559 1560 mhd_assert (! d->dbg.net_inited); 1561 mhd_assert (! d->dbg.net_deinited); 1562 #ifdef MHD_SOCKETS_KIND_POSIX 1563 d->net.cfg.max_fd_num = s->fd_number_limit; 1564 #endif /* MHD_SOCKETS_KIND_POSIX */ 1565 1566 ret = daemon_choose_and_preinit_events (d, s); 1567 if (MHD_SC_OK != ret) 1568 return ret; 1569 1570 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 1571 1572 /* No direct return of error codes is allowed beyond this point. 1573 Deinit/cleanup must be performed before return of any error. */ 1574 1575 #if defined(MHD_SOCKETS_KIND_POSIX) && defined(MHD_SUPPORT_SELECT) 1576 if (mhd_POLL_TYPE_SELECT == d->events.poll_type) 1577 { 1578 if ((MHD_INVALID_SOCKET == d->net.cfg.max_fd_num) || 1579 (FD_SETSIZE < d->net.cfg.max_fd_num)) 1580 d->net.cfg.max_fd_num = FD_SETSIZE; 1581 } 1582 #endif /* MHD_SOCKETS_KIND_POSIX && MHD_SUPPORT_SELECT */ 1583 1584 if (MHD_SC_OK == ret) 1585 { 1586 ret = create_bind_listen_stream_socket (d, s); 1587 1588 if (MHD_SC_OK == ret) 1589 { 1590 if ((MHD_INVALID_SOCKET != d->net.listen.fd) 1591 && ! d->net.listen.non_block 1592 && (mhd_D_IS_USING_EDGE_TRIG (d) || 1593 mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))) 1594 { 1595 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE, \ 1596 "The selected daemon work mode requires listening socket " 1597 "in non-blocking mode"); 1598 ret = MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE; 1599 } 1600 1601 if (MHD_SC_OK == ret) 1602 { 1603 if ((MHD_INVALID_SOCKET != d->net.listen.fd) && 1604 ((0 == d->net.listen.port) || 1605 (mhd_SOCKET_TYPE_UNKNOWN == d->net.listen.type))) 1606 detect_listen_type_and_port (d); 1607 1608 #ifndef NDEBUG 1609 d->dbg.net_inited = true; 1610 #endif 1611 return MHD_SC_OK; /* Success exit point */ 1612 } 1613 1614 /* Below is a cleanup path */ 1615 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1616 mhd_socket_close (d->net.listen.fd); 1617 } 1618 } 1619 1620 #ifdef MHD_SUPPORT_EPOLL 1621 if ((mhd_POLL_TYPE_EPOLL == d->events.poll_type)) 1622 close (d->events.data.epoll.e_fd); 1623 #endif /* MHD_SUPPORT_EPOLL */ 1624 1625 mhd_assert (MHD_SC_OK != ret); 1626 1627 return ret; 1628 } 1629 1630 1631 /** 1632 * Deinitialise daemon's network data 1633 * @param d the daemon object 1634 */ 1635 MHD_FN_PAR_NONNULL_ (1) static void 1636 daemon_deinit_net (struct MHD_Daemon *restrict d) 1637 { 1638 mhd_assert (d->dbg.net_inited); 1639 mhd_assert (! d->dbg.net_deinited); 1640 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 1641 #ifdef MHD_SUPPORT_EPOLL 1642 if (mhd_POLL_TYPE_EPOLL == d->events.poll_type) 1643 deinit_epoll (d); 1644 #endif /* MHD_SUPPORT_EPOLL */ 1645 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1646 mhd_socket_close (d->net.listen.fd); 1647 1648 #ifndef NDEBUG 1649 d->dbg.net_deinited = true; 1650 #endif 1651 } 1652 1653 1654 #if 0 1655 void 1656 dauth_init (struct MHD_Daemon *restrict d, 1657 struct DaemonOptions *restrict s) 1658 { 1659 mhd_assert ((NULL == s->random_entropy.v_buf) || \ 1660 (0 != s->random_entropy.v_buf_size)); 1661 mhd_assert ((0 == s->random_entropy.v_buf_size) || \ 1662 (NULL != s->random_entropy.v_buf)); 1663 } 1664 1665 1666 #endif 1667 1668 #ifdef MHD_SUPPORT_AUTH_DIGEST 1669 /** 1670 * Initialise daemon Digest Auth data 1671 * @param d the daemon object 1672 * @param s the user settings 1673 * @return #MHD_SC_OK on success, 1674 * the error code otherwise 1675 */ 1676 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1677 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1678 daemon_init_auth_digest (struct MHD_Daemon *restrict d, 1679 struct DaemonOptions *restrict s) 1680 { 1681 enum MHD_StatusCode ret; 1682 size_t nonces_num; 1683 1684 if (0 == s->random_entropy.v_buf_size) 1685 { 1686 /* No initialisation needed */ 1687 #ifndef HAVE_NULL_PTR_ALL_ZEROS 1688 d->auth_dg.entropy.data = NULL; 1689 d->auth_dg.nonces = NULL; 1690 #endif 1691 return MHD_SC_OK; 1692 } 1693 nonces_num = s->auth_digest_map_size; 1694 if (0 == nonces_num) 1695 nonces_num = 1000; 1696 d->auth_dg.nonces = (struct mhd_DaemonAuthDigestNonceData *) 1697 mhd_calloc (nonces_num, \ 1698 sizeof(struct mhd_DaemonAuthDigestNonceData)); 1699 if (NULL == d->auth_dg.nonces) 1700 { 1701 mhd_LOG_MSG (d, \ 1702 MHD_SC_DAEMON_MEM_ALLOC_FAILURE, \ 1703 "Failed to allocate memory for Digest Auth array"); 1704 return MHD_SC_DAEMON_MEM_ALLOC_FAILURE; 1705 } 1706 d->auth_dg.cfg.nonces_num = nonces_num; 1707 1708 if (! mhd_mutex_init (&(d->auth_dg.nonces_lock))) 1709 { 1710 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1711 "Failed to initialise mutex for the Digest Auth data"); 1712 ret = MHD_SC_MUTEX_INIT_FAILURE; 1713 } 1714 else 1715 { 1716 if (! mhd_atomic_counter_init (&(d->auth_dg.num_gen_nonces), 0)) 1717 { 1718 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1719 "Failed to initialise mutex for the Digest Auth data"); 1720 ret = MHD_SC_MUTEX_INIT_FAILURE; 1721 } 1722 else 1723 { 1724 /* Move ownership of the entropy buffer */ 1725 d->auth_dg.entropy.data = (char *) s->random_entropy.v_buf; 1726 d->auth_dg.entropy.size = s->random_entropy.v_buf_size; 1727 s->random_entropy.v_buf = NULL; 1728 s->random_entropy.v_buf_size = 0; 1729 1730 d->auth_dg.cfg.nonce_tmout = s->auth_digest_nonce_timeout; 1731 if (0 == d->auth_dg.cfg.nonce_tmout) 1732 d->auth_dg.cfg.nonce_tmout = MHD_AUTH_DIGEST_DEF_TIMEOUT; 1733 d->auth_dg.cfg.def_max_nc = s->auth_digest_def_max_nc; 1734 if (0 == d->auth_dg.cfg.def_max_nc) 1735 d->auth_dg.cfg.def_max_nc = MHD_AUTH_DIGEST_DEF_MAX_NC; 1736 1737 return MHD_SC_OK; /* Success exit point */ 1738 } 1739 mhd_mutex_destroy_chk (&(d->auth_dg.nonces_lock)); 1740 } 1741 1742 free (d->auth_dg.nonces); 1743 mhd_assert (MHD_SC_OK != ret); 1744 return ret; /* Failure exit point */ 1745 } 1746 1747 1748 /** 1749 * Deinitialise daemon Digest Auth data 1750 * @param d the daemon object 1751 */ 1752 MHD_FN_PAR_NONNULL_ (1) static void 1753 daemon_deinit_auth_digest (struct MHD_Daemon *restrict d) 1754 { 1755 if (0 == d->auth_dg.entropy.size) 1756 return; /* Digest Auth not used, nothing to deinitialise */ 1757 1758 mhd_assert (NULL != d->auth_dg.entropy.data); 1759 free (d->auth_dg.entropy.data); 1760 mhd_atomic_counter_deinit (&(d->auth_dg.num_gen_nonces)); 1761 mhd_mutex_destroy_chk (&(d->auth_dg.nonces_lock)); 1762 mhd_assert (NULL != d->auth_dg.nonces); 1763 free (d->auth_dg.nonces); 1764 } 1765 1766 1767 #else /* MHD_SUPPORT_AUTH_DIGEST */ 1768 #define daemon_init_auth_digest(d,s) (MHD_SC_OK) 1769 #define daemon_deinit_auth_digest(d) ((void) 0) 1770 #endif /* MHD_SUPPORT_AUTH_DIGEST */ 1771 1772 1773 /** 1774 * Initialise daemon TLS data 1775 * @param d the daemon object 1776 * @param s the user settings 1777 * @return #MHD_SC_OK on success, 1778 * the error code otherwise 1779 */ 1780 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1781 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1782 daemon_init_tls (struct MHD_Daemon *restrict d, 1783 struct DaemonOptions *restrict s) 1784 { 1785 #ifdef MHD_SUPPORT_HTTPS 1786 mhd_StatusCodeInt ret; 1787 #endif /* MHD_SUPPORT_HTTPS */ 1788 1789 mhd_assert (! d->dbg.tls_inited); 1790 #ifdef MHD_SUPPORT_HTTPS 1791 d->tls = NULL; 1792 #endif 1793 1794 if (MHD_TLS_BACKEND_NONE == s->tls) 1795 { 1796 #ifndef NDEBUG 1797 d->dbg.tls_inited = true; 1798 #endif 1799 return MHD_SC_OK; 1800 } 1801 #ifndef MHD_SUPPORT_HTTPS 1802 mhd_LOG_MSG (d, \ 1803 MHD_SC_TLS_DISABLED, \ 1804 "HTTPS is not supported by this MHD build"); 1805 return MHD_SC_TLS_DISABLED; 1806 #else /* MHD_SUPPORT_HTTPS */ 1807 if (1) 1808 { 1809 enum mhd_TlsBackendAvailable tls_avail; 1810 1811 tls_avail = mhd_tls_is_backend_available (s); 1812 if (mhd_TLS_BACKEND_AVAIL_NOT_SUPPORTED == tls_avail) 1813 { 1814 mhd_LOG_MSG (d, \ 1815 MHD_SC_TLS_BACKEND_UNSUPPORTED, \ 1816 "The requested TLS backend is not supported " \ 1817 "by this MHD build"); 1818 return MHD_SC_TLS_BACKEND_UNSUPPORTED; 1819 } 1820 else if (mhd_TLS_BACKEND_AVAIL_NOT_AVAILABLE == tls_avail) 1821 { 1822 mhd_LOG_MSG (d, \ 1823 MHD_SC_TLS_BACKEND_UNAVAILABLE, \ 1824 "The requested TLS backend is not available"); 1825 return MHD_SC_TLS_BACKEND_UNAVAILABLE; 1826 } 1827 } 1828 ret = mhd_tls_daemon_init (d, 1829 mhd_D_HAS_EDGE_TRIGG (d), 1830 s, 1831 &(d->tls)); 1832 mhd_assert ((MHD_SC_OK == ret) || (NULL == d->tls)); 1833 mhd_assert ((MHD_SC_OK != ret) || (NULL != d->tls)); 1834 #ifndef NDEBUG 1835 d->dbg.tls_inited = (MHD_SC_OK == ret); 1836 #endif 1837 return (enum MHD_StatusCode) ret; 1838 #endif /* MHD_SUPPORT_HTTPS */ 1839 } 1840 1841 1842 /** 1843 * Deinitialise daemon TLS data 1844 * @param d the daemon object 1845 */ 1846 MHD_FN_PAR_NONNULL_ (1) static void 1847 daemon_deinit_tls (struct MHD_Daemon *restrict d) 1848 { 1849 mhd_assert (d->dbg.tls_inited); 1850 #ifdef MHD_SUPPORT_HTTPS 1851 if (NULL != d->tls) 1852 { 1853 mhd_tls_thread_cleanup (d->tls); 1854 mhd_tls_daemon_deinit (d->tls); 1855 } 1856 #elif defined(NDEBUG) 1857 (void) d; /* Mute compiler warning */ 1858 #endif 1859 } 1860 1861 1862 /** 1863 * Initialise large buffer tracking. 1864 * @param d the daemon object 1865 * @param s the user settings 1866 * @return #MHD_SC_OK on success, 1867 * the error code otherwise 1868 */ 1869 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1870 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1871 daemon_init_large_buf (struct MHD_Daemon *restrict d, 1872 struct DaemonOptions *restrict s) 1873 { 1874 mhd_assert (! mhd_D_HAS_MASTER (d)); 1875 mhd_assert (0 != d->conns.cfg.count_limit); 1876 mhd_assert (0 != d->conns.cfg.mem_pool_size); 1877 1878 d->req_cfg.large_buf.space_left = s->large_pool_size; 1879 if (SIZE_MAX == d->req_cfg.large_buf.space_left) 1880 d->req_cfg.large_buf.space_left = 1881 (d->conns.cfg.count_limit * d->conns.cfg.mem_pool_size) / 32; /* Use ~3% of the maximum memory used by connections */ 1882 1883 #ifndef NDEBUG 1884 d->dbg.initial_lbuf_size = d->req_cfg.large_buf.space_left; 1885 #endif 1886 1887 if (! mhd_mutex_init_short (&(d->req_cfg.large_buf.lock))) 1888 { 1889 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1890 "Failed to initialise mutex for the global large buffer."); 1891 return MHD_SC_MUTEX_INIT_FAILURE; 1892 } 1893 return MHD_SC_OK; 1894 } 1895 1896 1897 /** 1898 * Deinitialise large buffer tracking. 1899 * @param d the daemon object 1900 */ 1901 static MHD_FN_PAR_NONNULL_ (1) void 1902 daemon_deinit_large_buf (struct MHD_Daemon *restrict d) 1903 { 1904 /* All large buffer allocations must be freed / deallocated earlier */ 1905 mhd_assert (d->dbg.initial_lbuf_size == d->req_cfg.large_buf.space_left); 1906 mhd_mutex_destroy_chk (&(d->req_cfg.large_buf.lock)); 1907 } 1908 1909 1910 /** 1911 * Finish initialisation of events processing 1912 * @param d the daemon object 1913 * @return #MHD_SC_OK on success, 1914 * the error code otherwise 1915 */ 1916 static MHD_FN_PAR_NONNULL_ (1) 1917 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1918 allocate_events (struct MHD_Daemon *restrict d) 1919 { 1920 #if defined(MHD_SUPPORT_POLL) || defined(MHD_SUPPORT_EPOLL) 1921 /** 1922 * The number of elements to be monitored by sockets polling function 1923 */ 1924 unsigned int num_elements; 1925 num_elements = 0; 1926 #ifdef MHD_SUPPORT_THREADS 1927 ++num_elements; /* For the ITC */ 1928 #endif 1929 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1930 ++num_elements; /* For the listening socket */ 1931 if (! mhd_D_HAS_THR_PER_CONN (d)) 1932 num_elements += d->conns.cfg.count_limit; 1933 #endif /* MHD_SUPPORT_POLL || MHD_SUPPORT_EPOLL */ 1934 1935 mhd_assert (0 != d->conns.cfg.count_limit); 1936 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 1937 1938 mhd_DLINKEDL_INIT_LIST (&(d->events),proc_ready); 1939 1940 switch (d->events.poll_type) 1941 { 1942 case mhd_POLL_TYPE_EXT: 1943 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 1944 /* Nothing to do: allocation is not needed */ 1945 #ifndef NDEBUG 1946 d->dbg.events_allocated = true; 1947 #endif 1948 return MHD_SC_OK; /* Success exit point */ 1949 break; 1950 #ifdef MHD_SUPPORT_SELECT 1951 case mhd_POLL_TYPE_SELECT: 1952 /* The pointers have been set to NULL during pre-initialisations of the events */ 1953 mhd_assert (NULL == d->events.data.select.rfds); 1954 mhd_assert (NULL == d->events.data.select.wfds); 1955 mhd_assert (NULL == d->events.data.select.efds); 1956 d->events.data.select.rfds = (fd_set *) malloc (sizeof(fd_set)); 1957 if (NULL != d->events.data.select.rfds) 1958 { 1959 d->events.data.select.wfds = (fd_set *) malloc (sizeof(fd_set)); 1960 if (NULL != d->events.data.select.wfds) 1961 { 1962 d->events.data.select.efds = (fd_set *) malloc (sizeof(fd_set)); 1963 if (NULL != d->events.data.select.efds) 1964 { 1965 #ifndef NDEBUG 1966 d->dbg.num_events_elements = FD_SETSIZE; 1967 d->dbg.events_allocated = true; 1968 #endif 1969 return MHD_SC_OK; /* Success exit point */ 1970 } 1971 1972 free (d->events.data.select.wfds); 1973 } 1974 free (d->events.data.select.rfds); 1975 } 1976 mhd_LOG_MSG (d, MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE, \ 1977 "Failed to allocate memory for fd_sets for the daemon"); 1978 return MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE; 1979 break; 1980 #endif /* MHD_SUPPORT_SELECT */ 1981 #ifdef MHD_SUPPORT_POLL 1982 case mhd_POLL_TYPE_POLL: 1983 /* The pointers have been set to NULL during pre-initialisations of the events */ 1984 mhd_assert (NULL == d->events.data.poll.fds); 1985 mhd_assert (NULL == d->events.data.poll.rel); 1986 if ((num_elements > d->conns.cfg.count_limit) /* Check for value overflow */ 1987 || (mhd_D_HAS_THR_PER_CONN (d))) 1988 { 1989 d->events.data.poll.fds = 1990 (struct pollfd *) malloc (sizeof(struct pollfd) * num_elements); 1991 if (NULL != d->events.data.poll.fds) 1992 { 1993 d->events.data.poll.rel = 1994 (union mhd_SocketRelation *) malloc (sizeof(union mhd_SocketRelation) 1995 * num_elements); 1996 if (NULL != d->events.data.poll.rel) 1997 { 1998 #ifndef NDEBUG 1999 d->dbg.num_events_elements = num_elements; 2000 d->dbg.events_allocated = true; 2001 #endif 2002 return MHD_SC_OK; /* Success exit point */ 2003 } 2004 free (d->events.data.poll.fds); 2005 } 2006 } 2007 mhd_LOG_MSG (d, MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE, \ 2008 "Failed to allocate memory for poll fds for the daemon"); 2009 return MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE; 2010 break; 2011 #endif /* MHD_SUPPORT_POLL */ 2012 #ifdef MHD_SUPPORT_EPOLL 2013 case mhd_POLL_TYPE_EPOLL: 2014 mhd_assert (! mhd_D_HAS_THR_PER_CONN (d)); 2015 /* The event FD has been created during pre-initialisations of the events */ 2016 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd); 2017 /* The pointer has been set to NULL during pre-initialisations of the events */ 2018 mhd_assert (NULL == d->events.data.epoll.events); 2019 mhd_assert (0 == d->events.data.epoll.num_elements); 2020 if ((num_elements > d->conns.cfg.count_limit) /* Check for value overflow */ 2021 || (mhd_D_HAS_THR_PER_CONN (d))) 2022 { 2023 const unsigned int upper_limit = (sizeof(void*) >= 8) ? 4096u : 1024u; 2024 2025 mhd_assert (0 < (int) upper_limit); 2026 mhd_assert (upper_limit == (unsigned int) (size_t) upper_limit); 2027 2028 /* Trade neglectable performance penalty for memory saving */ 2029 /* Very large amount of new events processed in batches */ 2030 if (num_elements > upper_limit) 2031 num_elements = upper_limit; 2032 2033 d->events.data.epoll.events = 2034 (struct epoll_event *) malloc (sizeof(struct epoll_event) 2035 * num_elements); 2036 if (NULL != d->events.data.epoll.events) 2037 { 2038 d->events.data.epoll.num_elements = num_elements; 2039 #ifndef NDEBUG 2040 d->dbg.num_events_elements = num_elements; 2041 d->dbg.events_allocated = true; 2042 #endif 2043 return MHD_SC_OK; /* Success exit point */ 2044 } 2045 } 2046 mhd_LOG_MSG (d, MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE, \ 2047 "Failed to allocate memory for epoll events for the daemon"); 2048 return MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE; 2049 break; 2050 #endif /* MHD_SUPPORT_EPOLL */ 2051 #ifndef MHD_SUPPORT_SELECT 2052 case mhd_POLL_TYPE_SELECT: 2053 #endif /* ! MHD_SUPPORT_SELECT */ 2054 #ifndef MHD_SUPPORT_POLL 2055 case mhd_POLL_TYPE_POLL: 2056 #endif /* ! MHD_SUPPORT_POLL */ 2057 case mhd_POLL_TYPE_NOT_SET_YET: 2058 default: 2059 mhd_UNREACHABLE (); 2060 break; 2061 } 2062 mhd_UNREACHABLE (); 2063 return MHD_SC_INTERNAL_ERROR; 2064 } 2065 2066 2067 /** 2068 * Deallocate events data 2069 * @param d the daemon object 2070 */ 2071 static MHD_FN_PAR_NONNULL_ (1) void 2072 deallocate_events (struct MHD_Daemon *restrict d) 2073 { 2074 mhd_assert (0 != d->conns.cfg.count_limit); 2075 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2076 if (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) 2077 { 2078 mhd_assert (0 && "Wrong workflow"); 2079 mhd_UNREACHABLE (); 2080 return; 2081 } 2082 #ifdef MHD_SUPPORT_SELECT 2083 else if (mhd_POLL_TYPE_SELECT == d->events.poll_type) 2084 { 2085 mhd_assert (NULL != d->events.data.select.efds); 2086 mhd_assert (NULL != d->events.data.select.wfds); 2087 mhd_assert (NULL != d->events.data.select.rfds); 2088 free (d->events.data.select.efds); 2089 free (d->events.data.select.wfds); 2090 free (d->events.data.select.rfds); 2091 } 2092 #endif /* MHD_SUPPORT_SELECT */ 2093 #ifdef MHD_SUPPORT_POLL 2094 else if (mhd_POLL_TYPE_POLL == d->events.poll_type) 2095 { 2096 mhd_assert (NULL != d->events.data.poll.rel); 2097 mhd_assert (NULL != d->events.data.poll.fds); 2098 free (d->events.data.poll.rel); 2099 free (d->events.data.poll.fds); 2100 } 2101 #endif /* MHD_SUPPORT_POLL */ 2102 #ifdef MHD_SUPPORT_EPOLL 2103 else if (mhd_POLL_TYPE_EPOLL == d->events.poll_type) 2104 { 2105 mhd_assert (0 != d->events.data.epoll.num_elements); 2106 mhd_assert (NULL != d->events.data.epoll.events); 2107 free (d->events.data.epoll.events); 2108 } 2109 #endif /* MHD_SUPPORT_EPOLL */ 2110 #ifndef NDEBUG 2111 d->dbg.events_allocated = false; 2112 #endif 2113 return; 2114 } 2115 2116 2117 /** 2118 * Initialise daemon's ITC 2119 * @param d the daemon object 2120 * @return #MHD_SC_OK on success, 2121 * the error code otherwise 2122 */ 2123 static MHD_FN_PAR_NONNULL_ (1) 2124 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2125 init_itc (struct MHD_Daemon *restrict d) 2126 { 2127 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2128 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2129 #ifdef MHD_SUPPORT_THREADS 2130 // TODO: add and process "thread unsafe" daemon's option 2131 if (! mhd_itc_init (&(d->threading.itc))) 2132 { 2133 #if defined(MHD_ITC_EVENTFD_) 2134 mhd_LOG_MSG ( \ 2135 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2136 "Failed to initialise eventFD for inter-thread communication"); 2137 #elif defined(MHD_ITC_PIPE_) 2138 mhd_LOG_MSG ( \ 2139 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2140 "Failed to create a pipe for inter-thread communication"); 2141 #elif defined(MHD_ITC_SOCKETPAIR_) 2142 mhd_LOG_MSG ( \ 2143 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2144 "Failed to create a socketpair for inter-thread communication"); 2145 #else 2146 #warning Missing expicit handling of the ITC type 2147 mhd_LOG_MSG ( \ 2148 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2149 "Failed to initialise inter-thread communication"); 2150 #endif 2151 return MHD_SC_ITC_INITIALIZATION_FAILED; 2152 } 2153 if (! mhd_FD_FITS_DAEMON (d,mhd_itc_r_fd (d->threading.itc))) 2154 { 2155 mhd_LOG_MSG (d, MHD_SC_ITC_FD_OUTSIDE_OF_SET_RANGE, \ 2156 "The inter-thread communication FD value is " \ 2157 "higher than allowed"); 2158 (void) mhd_itc_destroy (d->threading.itc); 2159 mhd_itc_set_invalid (&(d->threading.itc)); 2160 return MHD_SC_ITC_FD_OUTSIDE_OF_SET_RANGE; 2161 } 2162 #else /* ! MHD_SUPPORT_THREADS */ 2163 (void) d; /* Unused */ 2164 #endif /* ! MHD_SUPPORT_THREADS */ 2165 return MHD_SC_OK; 2166 } 2167 2168 2169 /** 2170 * Deallocate events data 2171 * @param d the daemon object 2172 */ 2173 static MHD_FN_PAR_NONNULL_ (1) void 2174 deinit_itc (struct MHD_Daemon *restrict d) 2175 { 2176 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2177 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2178 #ifdef MHD_SUPPORT_THREADS 2179 // TODO: add and process "thread unsafe" daemon's option 2180 mhd_assert (! mhd_ITC_IS_INVALID (d->threading.itc)); 2181 (void) mhd_itc_destroy (d->threading.itc); 2182 #else /* ! MHD_SUPPORT_THREADS */ 2183 (void) d; /* Unused */ 2184 #endif /* ! MHD_SUPPORT_THREADS */ 2185 } 2186 2187 2188 /** 2189 * The final part of events initialisation: pre-add ITC and listening FD to 2190 * the monitored items (if supported by monitoring syscall). 2191 * @param d the daemon object 2192 * @return #MHD_SC_OK on success, 2193 * the error code otherwise 2194 */ 2195 static MHD_FN_PAR_NONNULL_ (1) 2196 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2197 init_daemon_fds_monitoring (struct MHD_Daemon *restrict d) 2198 { 2199 mhd_assert (d->dbg.net_inited); 2200 mhd_assert (! d->dbg.net_deinited); 2201 mhd_assert (d->dbg.events_allocated); 2202 mhd_assert (! d->dbg.events_fully_inited); 2203 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2204 #ifdef MHD_SUPPORT_THREADS 2205 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 2206 #endif 2207 2208 d->events.accept_pending = false; 2209 2210 switch (d->events.poll_type) 2211 { 2212 case mhd_POLL_TYPE_EXT: 2213 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 2214 #ifdef MHD_SUPPORT_THREADS 2215 d->events.data.extr.itc_data.is_active = false; 2216 d->events.data.extr.itc_data.is_broken = false; 2217 #endif /* MHD_SUPPORT_THREADS */ 2218 if (! d->events.data.extr.reg_all) 2219 { 2220 bool itc_reg_succeed; 2221 2222 /* Register daemon's FDs now */ 2223 #ifdef MHD_SUPPORT_THREADS 2224 d->events.data.extr.itc_data.app_cntx = 2225 mhd_daemon_extr_event_reg (d, 2226 mhd_itc_r_fd (d->threading.itc), 2227 MHD_FD_STATE_RECV_EXCEPT, 2228 NULL, 2229 (struct MHD_EventUpdateContext *) 2230 mhd_SOCKET_REL_MARKER_ITC); 2231 itc_reg_succeed = (NULL != d->events.data.extr.itc_data.app_cntx); 2232 #else /* ! MHD_SUPPORT_THREADS */ 2233 itc_reg_succeed = true; 2234 #endif /* ! MHD_SUPPORT_THREADS */ 2235 if (itc_reg_succeed) 2236 { 2237 if (MHD_INVALID_SOCKET == d->net.listen.fd) 2238 { 2239 d->events.data.extr.listen_data.app_cntx = NULL; 2240 return MHD_SC_OK; /* Success exit point */ 2241 } 2242 2243 /* Need to register the listen FD */ 2244 d->events.data.extr.listen_data.app_cntx = 2245 mhd_daemon_extr_event_reg (d, 2246 d->net.listen.fd, 2247 MHD_FD_STATE_RECV_EXCEPT, 2248 NULL, 2249 (struct MHD_EventUpdateContext *) 2250 mhd_SOCKET_REL_MARKER_LISTEN); 2251 if (NULL != d->events.data.extr.listen_data.app_cntx) 2252 return MHD_SC_OK; /* Success exit point */ 2253 2254 /* Below is a clean-up path for 'case mhd_POLL_TYPE_EXT:' */ 2255 #ifdef MHD_SUPPORT_THREADS 2256 /* De-register ITC FD */ 2257 (void) mhd_daemon_extr_event_reg (d, 2258 mhd_itc_r_fd (d->threading.itc), 2259 MHD_FD_STATE_NONE, 2260 d->events.data.extr.itc_data.app_cntx, 2261 (struct MHD_EventUpdateContext *) 2262 mhd_SOCKET_REL_MARKER_ITC); 2263 d->events.data.extr.itc_data.app_cntx = NULL; 2264 #endif /* MHD_SUPPORT_THREADS */ 2265 } 2266 2267 mhd_LOG_MSG (d, MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE, \ 2268 "Failed to register daemon FDs in the application " 2269 "(external events) monitoring."); 2270 return MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE; 2271 } 2272 else 2273 { 2274 /* Daemons FDs are repeatedly registered every processing cycle */ 2275 #ifdef MHD_SUPPORT_THREADS 2276 d->events.data.extr.itc_data.app_cntx = NULL; 2277 #endif /* MHD_SUPPORT_THREADS */ 2278 d->events.data.extr.listen_data.app_cntx = NULL; 2279 return MHD_SC_OK; 2280 } 2281 break; 2282 #ifdef MHD_SUPPORT_SELECT 2283 case mhd_POLL_TYPE_SELECT: 2284 mhd_assert (NULL != d->events.data.select.rfds); 2285 mhd_assert (NULL != d->events.data.select.wfds); 2286 mhd_assert (NULL != d->events.data.select.efds); 2287 /* Nothing to do when using 'select()' */ 2288 return MHD_SC_OK; 2289 break; 2290 #endif /* MHD_SUPPORT_SELECT */ 2291 #ifdef MHD_SUPPORT_POLL 2292 case mhd_POLL_TYPE_POLL: 2293 mhd_assert (NULL != d->events.data.poll.fds); 2294 mhd_assert (NULL != d->events.data.poll.rel); 2295 if (1) 2296 { 2297 unsigned int i; 2298 i = 0; 2299 #ifdef MHD_SUPPORT_THREADS 2300 d->events.data.poll.fds[i].fd = mhd_itc_r_fd (d->threading.itc); 2301 d->events.data.poll.fds[i].events = POLLIN; 2302 d->events.data.poll.rel[i].fd_id = mhd_SOCKET_REL_MARKER_ITC; 2303 ++i; 2304 #endif 2305 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2306 { 2307 d->events.data.poll.fds[i].fd = d->net.listen.fd; 2308 d->events.data.poll.fds[i].events = POLLIN; 2309 d->events.data.poll.rel[i].fd_id = mhd_SOCKET_REL_MARKER_LISTEN; 2310 } 2311 } 2312 return MHD_SC_OK; 2313 break; 2314 #endif /* MHD_SUPPORT_POLL */ 2315 #ifdef MHD_SUPPORT_EPOLL 2316 case mhd_POLL_TYPE_EPOLL: 2317 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd); 2318 mhd_assert (NULL != d->events.data.epoll.events); 2319 mhd_assert (0 < d->events.data.epoll.num_elements); 2320 if (1) 2321 { 2322 struct epoll_event reg_event; 2323 #ifdef MHD_SUPPORT_THREADS 2324 reg_event.events = EPOLLIN | EPOLLET; 2325 reg_event.data.u64 = (uint64_t) mhd_SOCKET_REL_MARKER_ITC; /* uint64_t is used in the epoll header */ 2326 if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD, 2327 mhd_itc_r_fd (d->threading.itc), ®_event)) 2328 { 2329 mhd_LOG_MSG (d, MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE, \ 2330 "Failed to add ITC FD to the epoll monitoring."); 2331 return MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE; 2332 } 2333 mhd_dbg_print_fd_mon_req ("ITC", \ 2334 mhd_itc_r_fd (d->threading.itc), \ 2335 true, \ 2336 false, \ 2337 false); 2338 #endif 2339 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2340 { 2341 reg_event.events = EPOLLIN; 2342 reg_event.data.u64 = (uint64_t) mhd_SOCKET_REL_MARKER_LISTEN; /* uint64_t is used in the epoll header */ 2343 if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD, 2344 d->net.listen.fd, ®_event)) 2345 { 2346 mhd_LOG_MSG (d, MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE, \ 2347 "Failed to add listening FD to the epoll monitoring."); 2348 return MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE; 2349 } 2350 mhd_dbg_print_fd_mon_req ("lstn", \ 2351 d->net.listen.fd, \ 2352 true, \ 2353 false, \ 2354 false); 2355 } 2356 } 2357 return MHD_SC_OK; 2358 break; 2359 #endif /* MHD_SUPPORT_EPOLL */ 2360 #ifndef MHD_SUPPORT_SELECT 2361 case mhd_POLL_TYPE_SELECT: 2362 #endif /* ! MHD_SUPPORT_SELECT */ 2363 #ifndef MHD_SUPPORT_POLL 2364 case mhd_POLL_TYPE_POLL: 2365 #endif /* ! MHD_SUPPORT_POLL */ 2366 case mhd_POLL_TYPE_NOT_SET_YET: 2367 default: 2368 mhd_UNREACHABLE (); 2369 break; 2370 } 2371 mhd_UNREACHABLE (); 2372 return MHD_SC_INTERNAL_ERROR; 2373 } 2374 2375 2376 /** 2377 * The initial part of events de-initialisation: remove ITC and listening FD 2378 * from the monitored items (if supported by monitoring syscall). 2379 * @param d the daemon object 2380 */ 2381 static MHD_FN_PAR_NONNULL_ (1) void 2382 deinit_daemon_fds_monitoring (struct MHD_Daemon *restrict d) 2383 { 2384 mhd_assert (d->dbg.events_fully_inited); 2385 2386 switch (d->events.poll_type) 2387 { 2388 case mhd_POLL_TYPE_EXT: 2389 if (NULL != d->events.data.extr.listen_data.app_cntx) 2390 (void) mhd_daemon_extr_event_reg ( 2391 d, 2392 d->net.listen.fd, 2393 MHD_FD_STATE_NONE, 2394 d->events.data.extr.listen_data.app_cntx, 2395 (struct MHD_EventUpdateContext *) mhd_SOCKET_REL_MARKER_LISTEN); 2396 #ifdef MHD_SUPPORT_THREADS 2397 if (NULL != d->events.data.extr.itc_data.app_cntx) 2398 (void) mhd_daemon_extr_event_reg (d, 2399 mhd_itc_r_fd (d->threading.itc), 2400 MHD_FD_STATE_NONE, 2401 d->events.data.extr.itc_data.app_cntx, 2402 (struct MHD_EventUpdateContext *) 2403 mhd_SOCKET_REL_MARKER_ITC); 2404 #endif /* MHD_SUPPORT_THREADS */ 2405 return; 2406 #ifdef MHD_SUPPORT_SELECT 2407 case mhd_POLL_TYPE_SELECT: 2408 /* Nothing to do when using 'select()' */ 2409 return; 2410 break; 2411 #endif /* MHD_SUPPORT_SELECT */ 2412 #ifdef MHD_SUPPORT_POLL 2413 case mhd_POLL_TYPE_POLL: 2414 /* Nothing to do when using 'poll()' */ 2415 return; 2416 break; 2417 #endif /* MHD_SUPPORT_POLL */ 2418 #ifdef MHD_SUPPORT_EPOLL 2419 case mhd_POLL_TYPE_EPOLL: 2420 /* Nothing to do when using epoll. 2421 Monitoring stopped by closing epoll FD. */ 2422 return; 2423 break; 2424 #endif /* MHD_SUPPORT_EPOLL */ 2425 #ifndef MHD_SUPPORT_SELECT 2426 case mhd_POLL_TYPE_SELECT: 2427 #endif /* ! MHD_SUPPORT_SELECT */ 2428 #ifndef MHD_SUPPORT_POLL 2429 case mhd_POLL_TYPE_POLL: 2430 #endif /* ! MHD_SUPPORT_POLL */ 2431 case mhd_POLL_TYPE_NOT_SET_YET: 2432 default: 2433 mhd_UNREACHABLE (); 2434 break; 2435 } 2436 mhd_UNREACHABLE (); 2437 } 2438 2439 2440 /** 2441 * Initialise daemon connections' data. 2442 * @param d the daemon object 2443 * @param s the user settings 2444 * @return #MHD_SC_OK on success, 2445 * the error code otherwise 2446 */ 2447 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2448 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2449 init_individual_conns (struct MHD_Daemon *restrict d, 2450 struct DaemonOptions *restrict s) 2451 { 2452 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2453 mhd_assert (0 != d->conns.cfg.count_limit); 2454 2455 mhd_DLINKEDL_INIT_LIST (&(d->conns),all_conn); 2456 mhd_DLINKEDL_INIT_LIST (&(d->conns),def_timeout); 2457 mhd_DLINKEDL_INIT_LIST (&(d->conns),cust_timeout); 2458 d->conns.count = 0; 2459 d->conns.block_new = false; 2460 2461 d->conns.cfg.mem_pool_size = s->conn_memory_limit; 2462 if (0 == d->conns.cfg.mem_pool_size) 2463 d->conns.cfg.mem_pool_size = 32 * 1024; 2464 else if (256 > d->conns.cfg.mem_pool_size) 2465 d->conns.cfg.mem_pool_size = 256; 2466 2467 switch (s->conn_buff_zeroing) 2468 { 2469 case MHD_CONN_BUFFER_ZEROING_DISABLED: 2470 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_NEVER; 2471 break; 2472 case MHD_CONN_BUFFER_ZEROING_BASIC: 2473 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ON_RESET; 2474 break; 2475 case MHD_CONN_BUFFER_ZEROING_HEAVY: 2476 default: 2477 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ALWAYS; 2478 break; 2479 } 2480 2481 #ifdef MHD_SUPPORT_UPGRADE 2482 mhd_DLINKEDL_INIT_LIST (&(d->conns.upgr),upgr_cleanup); 2483 if (! mhd_mutex_init (&(d->conns.upgr.ucu_lock))) 2484 { 2485 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 2486 "Failed to initialise mutex for the upgraded " \ 2487 "connection list."); 2488 return MHD_SC_MUTEX_INIT_FAILURE; 2489 } 2490 #endif /* MHD_SUPPORT_UPGRADE */ 2491 2492 #ifndef NDEBUG 2493 d->dbg.connections_inited = true; 2494 #endif 2495 return MHD_SC_OK; 2496 } 2497 2498 2499 /** 2500 * Deinitialise daemon connections' data. 2501 * @param d the daemon object 2502 */ 2503 static MHD_FN_PAR_NONNULL_ (1) void 2504 deinit_individual_conns (struct MHD_Daemon *restrict d) 2505 { 2506 #ifdef MHD_SUPPORT_UPGRADE 2507 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns.upgr),upgr_cleanup)); 2508 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns.upgr),upgr_cleanup)); 2509 2510 mhd_mutex_destroy_chk (&(d->conns.upgr.ucu_lock)); 2511 #endif /* MHD_SUPPORT_UPGRADE */ 2512 2513 mhd_assert (0 != d->conns.cfg.mem_pool_size); 2514 mhd_assert (0 == d->conns.count); 2515 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),cust_timeout)); 2516 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),cust_timeout)); 2517 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),def_timeout)); 2518 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),def_timeout)); 2519 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)); 2520 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)); 2521 } 2522 2523 2524 /** 2525 * Prepare daemon-local (worker daemon for thread pool mode) threading data 2526 * and finish events initialising. 2527 * To be used only with non-master daemons. 2528 * Do not start the thread even if configured for the internal threads. 2529 * @param d the daemon object 2530 * @return #MHD_SC_OK on success, 2531 * the error code otherwise 2532 */ 2533 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2534 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2535 init_individual_thread_data_events_conns (struct MHD_Daemon *restrict d, 2536 struct DaemonOptions *restrict s) 2537 { 2538 enum MHD_StatusCode res; 2539 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2540 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2541 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2542 mhd_assert (! d->dbg.connections_inited); 2543 2544 res = allocate_events (d); 2545 if (MHD_SC_OK != res) 2546 return res; 2547 2548 res = init_itc (d); 2549 if (MHD_SC_OK == res) 2550 { 2551 res = init_daemon_fds_monitoring (d); 2552 2553 if (MHD_SC_OK == res) 2554 { 2555 #ifndef NDEBUG 2556 d->dbg.events_fully_inited = true; 2557 #endif 2558 #ifdef MHD_SUPPORT_THREADS 2559 mhd_thread_handle_ID_set_invalid (&(d->threading.tid)); 2560 d->threading.stop_requested = false; 2561 #endif /* MHD_SUPPORT_THREADS */ 2562 #ifndef NDEBUG 2563 d->dbg.threading_inited = true; 2564 #endif 2565 2566 res = init_individual_conns (d, s); 2567 if (MHD_SC_OK == res) 2568 return MHD_SC_OK; 2569 2570 /* Below is a clean-up path */ 2571 2572 deinit_daemon_fds_monitoring (d); 2573 } 2574 deinit_itc (d); 2575 } 2576 deallocate_events (d); 2577 mhd_assert (MHD_SC_OK != res); 2578 return res; 2579 } 2580 2581 2582 /** 2583 * Deinit daemon-local (worker daemon for thread pool mode) threading data 2584 * and deallocate events. 2585 * To be used only with non-master daemons. 2586 * Do not start the thread even if configured for the internal threads. 2587 * @param d the daemon object 2588 */ 2589 static MHD_FN_PAR_NONNULL_ (1) void 2590 deinit_individual_thread_data_events_conns (struct MHD_Daemon *restrict d) 2591 { 2592 deinit_individual_conns (d); 2593 deinit_daemon_fds_monitoring (d); 2594 deinit_itc (d); 2595 deallocate_events (d); 2596 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)); 2597 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready)); 2598 #ifndef NDEBUG 2599 d->dbg.events_fully_inited = false; 2600 #endif 2601 } 2602 2603 2604 /** 2605 * Set the maximum number of handled connections for the daemon. 2606 * Works only for global limit, does not work for the worker daemon. 2607 * @param d the daemon object 2608 * @param s the user settings 2609 * @return #MHD_SC_OK on success, 2610 * the error code otherwise 2611 */ 2612 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2613 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2614 set_connections_total_limits (struct MHD_Daemon *restrict d, 2615 struct DaemonOptions *restrict s) 2616 { 2617 unsigned int limit_by_conf; 2618 unsigned int limit_by_num; 2619 unsigned int limit_by_select; 2620 unsigned int resulting_limit; 2621 bool error_by_fd_setsize; 2622 unsigned int num_worker_daemons; 2623 2624 mhd_assert (! mhd_D_HAS_MASTER (d)); 2625 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2626 2627 num_worker_daemons = 1; 2628 #ifdef MHD_SUPPORT_THREADS 2629 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int) 2630 { 2631 mhd_assert (MHD_WM_WORKER_THREADS == s->work_mode.mode); 2632 if ((0 != s->global_connection_limit) && 2633 (0 != s->work_mode.params.num_worker_threads) && 2634 (s->global_connection_limit < s->work_mode.params.num_worker_threads)) 2635 { 2636 mhd_LOG_MSG ( \ 2637 d, MHD_SC_CONFIGURATION_CONN_LIMIT_TOO_SMALL, \ 2638 "The limit specified by MHD_D_O_GLOBAL_CONNECTION_LIMIT is smaller " \ 2639 "then the number of worker threads."); 2640 return MHD_SC_CONFIGURATION_CONN_LIMIT_TOO_SMALL; 2641 } 2642 } 2643 if (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)) 2644 num_worker_daemons = s->work_mode.params.num_worker_threads; 2645 #endif /* MHD_SUPPORT_THREADS */ 2646 2647 limit_by_conf = s->global_connection_limit; 2648 limit_by_num = UINT_MAX; 2649 limit_by_select = UINT_MAX; 2650 mhd_assert (UINT_MAX == limit_by_num); /* Mute analyser warning */ 2651 2652 error_by_fd_setsize = false; 2653 #ifdef MHD_SOCKETS_KIND_POSIX 2654 if (1) 2655 { 2656 limit_by_num = (unsigned int) d->net.cfg.max_fd_num; 2657 if (0 != limit_by_num) 2658 { 2659 /* Find the upper limit. 2660 The real limit is lower, as any other process FDs will use the slots 2661 in the allowed numbers range */ 2662 limit_by_num -= 3; /* The numbers zero, one and two are used typically */ 2663 #ifdef MHD_SUPPORT_THREADS 2664 limit_by_num -= mhd_ITC_NUM_FDS * num_worker_daemons; 2665 #endif /* MHD_SUPPORT_THREADS */ 2666 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2667 --limit_by_num; /* One FD is used for the listening socket */ 2668 if ((num_worker_daemons > limit_by_num) || 2669 (limit_by_num > (unsigned int) d->net.cfg.max_fd_num) /* Underflow */) 2670 { 2671 if (d->net.cfg.max_fd_num == s->fd_number_limit) 2672 { 2673 mhd_LOG_MSG ( \ 2674 d, MHD_SC_MAX_FD_NUMBER_LIMIT_TOO_STRICT, \ 2675 "The limit specified by MHD_D_O_FD_NUMBER_LIMIT is too strict " \ 2676 "for this daemon settings."); 2677 return MHD_SC_MAX_FD_NUMBER_LIMIT_TOO_STRICT; 2678 } 2679 else 2680 { 2681 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 2682 error_by_fd_setsize = true; 2683 } 2684 } 2685 } 2686 else 2687 limit_by_num = (unsigned int) INT_MAX; 2688 } 2689 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 2690 if (1) 2691 { 2692 #ifdef MHD_SUPPORT_SELECT 2693 if ((mhd_DAEMON_TYPE_SINGLE == d->threading.d_type) && 2694 (mhd_POLL_TYPE_SELECT == d->events.poll_type)) 2695 { 2696 /* W32 limits the total number (count) of sockets used for select() */ 2697 unsigned int limit_per_worker; 2698 2699 limit_per_worker = FD_SETSIZE; 2700 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2701 --limit_per_worker; /* The slot for the listening socket */ 2702 #ifdef MHD_SUPPORT_THREADS 2703 --limit_per_worker; /* The slot for the ITC */ 2704 #endif /* MHD_SUPPORT_THREADS */ 2705 if ((0 == limit_per_worker) || (limit_per_worker > FD_SETSIZE)) 2706 error_by_fd_setsize = true; 2707 else 2708 { 2709 limit_by_select = limit_per_worker * num_worker_daemons; 2710 if (limit_by_select / limit_per_worker != num_worker_daemons) 2711 limit_by_select = UINT_MAX; 2712 } 2713 } 2714 #endif /* MHD_SUPPORT_SELECT */ 2715 (void) 0; /* Mute compiler warning */ 2716 } 2717 #endif /* MHD_SOCKETS_KIND_POSIX */ 2718 if (error_by_fd_setsize) 2719 { 2720 mhd_LOG_MSG ( \ 2721 d, MHD_SC_SYS_FD_SETSIZE_TOO_STRICT, \ 2722 "The FD_SETSIZE is too strict to run daemon with the polling " \ 2723 "by select() and with the specified number of workers."); 2724 return MHD_SC_SYS_FD_SETSIZE_TOO_STRICT; 2725 } 2726 2727 if (0 != limit_by_conf) 2728 { 2729 /* The number has bet set explicitly */ 2730 resulting_limit = limit_by_conf; 2731 } 2732 else 2733 { 2734 /* No user configuration provided */ 2735 unsigned int suggested_limit; 2736 #ifndef MHD_SOCKETS_KIND_WINSOCK 2737 #define TYPICAL_NOFILES_LIMIT (1024) /* The usual limit for the number of open FDs */ 2738 suggested_limit = TYPICAL_NOFILES_LIMIT; 2739 suggested_limit -= 3; /* The numbers zero, one and two are used typically */ 2740 #ifdef MHD_SUPPORT_THREADS 2741 suggested_limit -= mhd_ITC_NUM_FDS * num_worker_daemons; 2742 #endif /* MHD_SUPPORT_THREADS */ 2743 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2744 --suggested_limit; /* One FD is used for the listening socket */ 2745 if (suggested_limit > TYPICAL_NOFILES_LIMIT) 2746 suggested_limit = 0; /* Overflow */ 2747 #else /* MHD_SOCKETS_KIND_WINSOCK */ 2748 #ifdef _WIN64 2749 suggested_limit = 2048; 2750 #else 2751 suggested_limit = 1024; 2752 #endif 2753 #endif /* MHD_SOCKETS_KIND_WINSOCK */ 2754 if (suggested_limit < num_worker_daemons) 2755 { 2756 /* Use at least one connection for every worker daemon and 2757 let the system to restrict the new connections if they are above 2758 the system limits. */ 2759 suggested_limit = num_worker_daemons; 2760 } 2761 resulting_limit = suggested_limit; 2762 } 2763 if (resulting_limit > limit_by_num) 2764 resulting_limit = limit_by_num; 2765 2766 if (resulting_limit > limit_by_select) 2767 resulting_limit = limit_by_select; 2768 2769 mhd_assert (resulting_limit >= num_worker_daemons); 2770 d->conns.cfg.count_limit = resulting_limit; 2771 if (d->conns.cfg.per_ip_limit <= d->conns.cfg.count_limit) 2772 d->conns.cfg.per_ip_limit = 0; /* Already enforced by global limit */ 2773 2774 return MHD_SC_OK; 2775 } 2776 2777 2778 /** 2779 * Set correct daemon threading type. 2780 * Set the number of workers for thread pool type. 2781 * @param d the daemon object 2782 * @return #MHD_SC_OK on success, 2783 * the error code otherwise 2784 */ 2785 MHD_FN_PAR_NONNULL_ (1) \ 2786 MHD_FN_MUST_CHECK_RESULT_ static inline enum MHD_StatusCode 2787 set_d_threading_type (struct MHD_Daemon *restrict d) 2788 { 2789 switch (d->wmode_int) 2790 { 2791 case mhd_WM_INT_EXTERNAL_EVENTS_EDGE: 2792 case mhd_WM_INT_EXTERNAL_EVENTS_LEVEL: 2793 mhd_assert (! mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2794 mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type); 2795 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 2796 #ifdef MHD_SUPPORT_THREADS 2797 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 2798 #endif /* MHD_SUPPORT_THREADS */ 2799 return MHD_SC_OK; 2800 case mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS: 2801 mhd_assert (! mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2802 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2803 #ifdef MHD_SUPPORT_THREADS 2804 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 2805 #endif /* MHD_SUPPORT_THREADS */ 2806 return MHD_SC_OK; 2807 #ifdef MHD_SUPPORT_THREADS 2808 case mhd_WM_INT_INTERNAL_EVENTS_ONE_THREAD: 2809 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2810 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2811 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 2812 return MHD_SC_OK; 2813 case mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION: 2814 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2815 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2816 mhd_assert (! mhd_POLL_TYPE_INT_IS_EPOLL (d->events.poll_type)); 2817 d->threading.d_type = mhd_DAEMON_TYPE_LISTEN_ONLY; 2818 return MHD_SC_OK; 2819 case mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL: 2820 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2821 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2822 d->threading.d_type = mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY; 2823 return MHD_SC_OK; 2824 #endif /* MHD_SUPPORT_THREADS */ 2825 default: 2826 break; 2827 } 2828 mhd_UNREACHABLE (); 2829 return MHD_SC_INTERNAL_ERROR; 2830 } 2831 2832 2833 #ifdef MHD_SUPPORT_THREADS 2834 2835 /** 2836 * De-initialise workers pool, including workers daemons. 2837 * The threads must be not running. 2838 * @param d the daemon object 2839 * @param num_workers the number of workers to deinit 2840 */ 2841 static MHD_FN_PAR_NONNULL_ (1) void 2842 deinit_workers_pool (struct MHD_Daemon *restrict d, 2843 unsigned int num_workers) 2844 { 2845 unsigned int i; 2846 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2847 mhd_assert (NULL != d->threading.hier.pool.workers); 2848 mhd_assert ((2 <= d->threading.hier.pool.num) || \ 2849 (mhd_DAEMON_STATE_STARTING == d->state)); 2850 mhd_assert ((num_workers == d->threading.hier.pool.num) || \ 2851 (mhd_DAEMON_STATE_STARTING == d->state)); 2852 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 2853 (mhd_DAEMON_STATE_STARTING == d->state)); 2854 2855 /* Deinitialise in reverse order */ 2856 for (i = num_workers - 1; num_workers > i; --i) 2857 { /* Note: loop exits after underflow of 'i' */ 2858 struct MHD_Daemon *const worker = d->threading.hier.pool.workers + i; 2859 deinit_individual_thread_data_events_conns (worker); 2860 #ifdef MHD_SUPPORT_EPOLL 2861 if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type) 2862 deinit_epoll (worker); 2863 #endif /* MHD_SUPPORT_EPOLL */ 2864 } 2865 free (d->threading.hier.pool.workers); 2866 #ifndef NDEBUG 2867 d->dbg.thread_pool_inited = false; 2868 #endif 2869 } 2870 2871 2872 /** 2873 * Nullify worker daemon member that should be set only in master daemon 2874 * @param d the daemon object 2875 */ 2876 static MHD_FN_PAR_NONNULL_ (1) void 2877 reset_master_only_areas (struct MHD_Daemon *restrict d) 2878 { 2879 #ifdef MHD_SUPPORT_AUTH_DIGEST 2880 memset (&(d->auth_dg.nonces_lock), 2881 0x7F, 2882 sizeof(d->auth_dg.nonces_lock)); 2883 #endif 2884 /* Not needed. It is initialised later */ 2885 /* memset (&(d->req_cfg.large_buf), 0, sizeof(d->req_cfg.large_buf)); */ 2886 (void) d; 2887 } 2888 2889 2890 /** 2891 * Initialise workers pool, including workers daemons. 2892 * Do not start the threads. 2893 * @param d the daemon object 2894 * @param s the user settings 2895 * @return #MHD_SC_OK on success, 2896 * the error code otherwise 2897 */ 2898 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2899 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2900 init_workers_pool (struct MHD_Daemon *restrict d, 2901 struct DaemonOptions *restrict s) 2902 { 2903 enum MHD_StatusCode res; 2904 size_t workers_pool_size; 2905 unsigned int conn_per_daemon; 2906 unsigned int num_workers; 2907 unsigned int conn_remainder; 2908 unsigned int i; 2909 2910 mhd_assert (d->dbg.net_inited); 2911 mhd_assert (! d->dbg.net_deinited); 2912 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int); 2913 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2914 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET < d->events.poll_type); 2915 mhd_assert (1 < s->work_mode.params.num_worker_threads); 2916 mhd_assert (0 != d->conns.cfg.count_limit); 2917 mhd_assert (s->work_mode.params.num_worker_threads <= \ 2918 d->conns.cfg.count_limit); 2919 mhd_assert (! d->dbg.thread_pool_inited); 2920 2921 num_workers = s->work_mode.params.num_worker_threads; 2922 workers_pool_size = 2923 (sizeof(struct MHD_Daemon) * num_workers); 2924 if (workers_pool_size / num_workers != sizeof(struct MHD_Daemon)) 2925 { /* Overflow */ 2926 mhd_LOG_MSG ( \ 2927 d, MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE, \ 2928 "The size of the thread pool is too large."); 2929 return MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE; 2930 } 2931 2932 #ifndef NDEBUG 2933 mhd_itc_set_invalid (&(d->threading.itc)); 2934 mhd_thread_handle_ID_set_invalid (&(d->threading.tid)); 2935 #endif 2936 2937 d->threading.hier.pool.workers = (struct MHD_Daemon *) 2938 malloc (workers_pool_size); 2939 if (NULL == d->threading.hier.pool.workers) 2940 { 2941 mhd_LOG_MSG ( \ 2942 d, MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE, \ 2943 "Failed to allocate memory for the thread pool."); 2944 return MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE; 2945 } 2946 2947 conn_per_daemon = d->conns.cfg.count_limit / num_workers; 2948 conn_remainder = d->conns.cfg.count_limit % num_workers; 2949 res = MHD_SC_OK; 2950 for (i = 0; num_workers > i; ++i) 2951 { 2952 struct MHD_Daemon *restrict const worker = 2953 d->threading.hier.pool.workers + i; 2954 memcpy (worker, d, sizeof(struct MHD_Daemon)); 2955 reset_master_only_areas (worker); 2956 2957 worker->threading.d_type = mhd_DAEMON_TYPE_WORKER; 2958 worker->threading.hier.master = d; 2959 worker->conns.cfg.count_limit = conn_per_daemon; 2960 if (conn_remainder > i) 2961 worker->conns.cfg.count_limit++; /* Distribute the reminder */ 2962 #ifdef MHD_SUPPORT_EPOLL 2963 if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type) 2964 { 2965 if (0 == i) 2966 { 2967 mhd_assert (0 <= d->events.data.epoll.e_fd); 2968 /* Move epoll control FD from the master daemon to the first worker */ 2969 /* The FD has been copied by memcpy(). Clean-up the master daemon. */ 2970 d->events.data.epoll.e_fd = MHD_INVALID_SOCKET; 2971 } 2972 else 2973 res = init_epoll (worker, 2974 true); 2975 } 2976 #endif /* MHD_SUPPORT_EPOLL */ 2977 if (MHD_SC_OK == res) 2978 { 2979 res = init_individual_thread_data_events_conns (worker, s); 2980 if (MHD_SC_OK == res) 2981 continue; /* Process the next worker */ 2982 2983 /* Below is a clean-up of the current slot */ 2984 2985 #ifdef MHD_SUPPORT_EPOLL 2986 if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type) 2987 deinit_epoll (worker); 2988 #endif /* MHD_SUPPORT_EPOLL */ 2989 } 2990 break; 2991 } 2992 if (num_workers == i) 2993 { 2994 mhd_assert (MHD_SC_OK == res); 2995 #ifndef NDEBUG 2996 d->dbg.thread_pool_inited = true; 2997 d->dbg.threading_inited = true; 2998 #endif 2999 d->threading.hier.pool.num = num_workers; 3000 return MHD_SC_OK; 3001 } 3002 3003 /* Below is a clean-up */ 3004 3005 mhd_assert (MHD_SC_OK != res); 3006 deinit_workers_pool (d, i); 3007 return res; 3008 } 3009 3010 3011 #endif /* MHD_SUPPORT_THREADS */ 3012 3013 /** 3014 * Initialise threading and inter-thread communications. 3015 * Also finish initialisation of events processing and initialise daemon's 3016 * connection data. 3017 * Do not start the thread even if configured for the internal threads. 3018 * @param d the daemon object 3019 * @param s the user settings 3020 * @return #MHD_SC_OK on success, 3021 * the error code otherwise 3022 */ 3023 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3024 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3025 daemon_init_threading_and_conn (struct MHD_Daemon *restrict d, 3026 struct DaemonOptions *restrict s) 3027 { 3028 enum MHD_StatusCode res; 3029 3030 mhd_assert (d->dbg.net_inited); 3031 mhd_assert (! d->dbg.net_deinited); 3032 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 3033 3034 res = set_d_threading_type (d); 3035 if (MHD_SC_OK != res) 3036 return res; 3037 3038 res = set_connections_total_limits (d, s); 3039 if (MHD_SC_OK != res) 3040 return res; 3041 3042 #ifdef MHD_SUPPORT_THREADS 3043 d->threading.cfg.stack_size = s->stack_size; 3044 #endif /* MHD_SUPPORT_THREADS */ 3045 3046 if (! mhd_D_HAS_WORKERS (d)) 3047 res = init_individual_thread_data_events_conns (d, s); 3048 #ifdef MHD_SUPPORT_THREADS 3049 else 3050 { 3051 res = init_workers_pool (d, s); 3052 if (MHD_SC_OK == res) 3053 { 3054 /* Copy some settings to the master daemon */ 3055 d->conns.cfg.mem_pool_size = 3056 d->threading.hier.pool.workers[0].conns.cfg.mem_pool_size; 3057 } 3058 } 3059 #endif /* MHD_SUPPORT_THREADS */ 3060 3061 if (MHD_SC_OK == res) 3062 { 3063 mhd_assert (d->dbg.events_allocated || \ 3064 mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3065 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type) || \ 3066 ! d->dbg.events_allocated); 3067 mhd_assert (! d->dbg.thread_pool_inited || \ 3068 mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3069 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type) || \ 3070 d->dbg.thread_pool_inited); 3071 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3072 mhd_assert (! d->dbg.events_allocated || d->dbg.connections_inited); 3073 mhd_assert (! d->dbg.connections_inited || d->dbg.events_allocated); 3074 } 3075 return res; 3076 } 3077 3078 3079 /** 3080 * De-initialise threading and inter-thread communications. 3081 * Also deallocate events and de-initialise daemon's connection data. 3082 * No daemon-manged threads should be running. 3083 * @param d the daemon object 3084 */ 3085 static MHD_FN_PAR_NONNULL_ (1) void 3086 daemon_deinit_threading_and_conn (struct MHD_Daemon *restrict d) 3087 { 3088 mhd_assert (d->dbg.net_inited); 3089 mhd_assert (! d->dbg.net_deinited); 3090 mhd_assert (d->dbg.threading_inited); 3091 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3092 if (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)) 3093 { 3094 mhd_assert (! mhd_WM_INT_IS_THREAD_POOL (d->wmode_int)); 3095 mhd_assert (d->dbg.connections_inited); 3096 mhd_assert (d->dbg.events_allocated); 3097 mhd_assert (! d->dbg.thread_pool_inited); 3098 deinit_individual_thread_data_events_conns (d); 3099 } 3100 else 3101 { 3102 #ifdef MHD_SUPPORT_THREADS 3103 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int); 3104 mhd_assert (! d->dbg.connections_inited); 3105 mhd_assert (! d->dbg.events_allocated); 3106 mhd_assert (d->dbg.thread_pool_inited); 3107 deinit_workers_pool (d, d->threading.hier.pool.num); 3108 #else /* ! MHD_SUPPORT_THREADS */ 3109 mhd_assert (0 && "Impossible value"); 3110 mhd_UNREACHABLE (); 3111 (void) 0; 3112 #endif /* ! MHD_SUPPORT_THREADS */ 3113 } 3114 } 3115 3116 3117 #ifdef MHD_SUPPORT_THREADS 3118 3119 /** 3120 * Start the daemon individual single thread. 3121 * Works both for single thread daemons and for worker daemon for thread 3122 * pool mode. 3123 * Must be called only for daemons with internal threads. 3124 * @param d the daemon object, must be completely initialised 3125 * @return #MHD_SC_OK on success, 3126 * the error code otherwise 3127 */ 3128 static MHD_FN_PAR_NONNULL_ (1) 3129 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3130 start_individual_daemon_thread (struct MHD_Daemon *restrict d) 3131 { 3132 mhd_assert (d->dbg.threading_inited); 3133 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3134 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3135 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3136 mhd_assert (! mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3137 3138 if (mhd_DAEMON_TYPE_SINGLE == d->threading.d_type) 3139 { 3140 if (! mhd_create_named_thread ( \ 3141 &(d->threading.tid), "MHD-single", \ 3142 d->threading.cfg.stack_size, \ 3143 &mhd_worker_all_events, \ 3144 (void*) d)) 3145 { 3146 mhd_LOG_MSG (d, MHD_SC_THREAD_MAIN_LAUNCH_FAILURE, \ 3147 "Failed to start daemon main thread."); 3148 return MHD_SC_THREAD_MAIN_LAUNCH_FAILURE; 3149 } 3150 } 3151 else if (mhd_DAEMON_TYPE_WORKER == d->threading.d_type) 3152 { 3153 if (! mhd_create_named_thread ( \ 3154 &(d->threading.tid), "MHD-worker", \ 3155 d->threading.cfg.stack_size, \ 3156 &mhd_worker_all_events, \ 3157 (void*) d)) 3158 { 3159 mhd_LOG_MSG (d, MHD_SC_THREAD_WORKER_LAUNCH_FAILURE, \ 3160 "Failed to start daemon worker thread."); 3161 return MHD_SC_THREAD_WORKER_LAUNCH_FAILURE; 3162 } 3163 } 3164 else if (mhd_DAEMON_TYPE_LISTEN_ONLY == d->threading.d_type) 3165 { 3166 if (! mhd_create_named_thread ( \ 3167 &(d->threading.tid), "MHD-listen", \ 3168 d->threading.cfg.stack_size, \ 3169 &mhd_worker_listening_only, \ 3170 (void*) d)) 3171 { 3172 mhd_LOG_MSG (d, MHD_SC_THREAD_LISTENING_LAUNCH_FAILURE, \ 3173 "Failed to start daemon listening thread."); 3174 return MHD_SC_THREAD_LISTENING_LAUNCH_FAILURE; 3175 } 3176 } 3177 else 3178 { 3179 mhd_assert (0 && "Impossible value"); 3180 mhd_UNREACHABLE (); 3181 return MHD_SC_INTERNAL_ERROR; 3182 } 3183 mhd_assert (mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3184 return MHD_SC_OK; 3185 } 3186 3187 3188 /** 3189 * Stop the daemon individual single thread. 3190 * Works both for single thread daemons and for worker daemon for thread 3191 * pool mode. 3192 * Must be called only for daemons with internal threads. 3193 * @param d the daemon object, must be completely initialised 3194 */ 3195 MHD_FN_PAR_NONNULL_ (1) static void 3196 stop_individual_daemon_thread (struct MHD_Daemon *restrict d) 3197 { 3198 mhd_assert (d->dbg.threading_inited); 3199 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3200 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3201 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3202 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 3203 (mhd_DAEMON_STATE_STARTING == d->state)); 3204 mhd_assert (mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3205 3206 d->threading.stop_requested = true; 3207 3208 mhd_daemon_trigger_itc (d); 3209 if (! mhd_thread_handle_ID_join_thread (d->threading.tid)) 3210 { 3211 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_ERROR, \ 3212 "Failed to stop daemon main thread."); 3213 } 3214 } 3215 3216 3217 /** 3218 * Stop all worker threads in the thread pool. 3219 * Must be called only for master daemons with thread pool. 3220 * @param d the daemon object, the workers threads must be running 3221 * @param num_workers the number of threads to stop 3222 */ 3223 static MHD_FN_PAR_NONNULL_ (1) void 3224 stop_worker_pool_threads (struct MHD_Daemon *restrict d, 3225 unsigned int num_workers) 3226 { 3227 unsigned int i; 3228 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3229 mhd_assert (NULL != d->threading.hier.pool.workers); 3230 mhd_assert (0 != d->threading.hier.pool.num); 3231 mhd_assert (d->dbg.thread_pool_inited); 3232 mhd_assert (2 <= d->threading.hier.pool.num); 3233 mhd_assert ((num_workers == d->threading.hier.pool.num) || \ 3234 (mhd_DAEMON_STATE_STARTING == d->state)); 3235 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 3236 (mhd_DAEMON_STATE_STARTING == d->state)); 3237 3238 /* Process all the threads in the reverse order */ 3239 3240 /* Trigger all threads */ 3241 for (i = num_workers - 1; num_workers > i; --i) 3242 { /* Note: loop exits after underflow of 'i' */ 3243 d->threading.hier.pool.workers[i].threading.stop_requested = true; 3244 mhd_assert (mhd_ITC_IS_VALID ( \ 3245 d->threading.hier.pool.workers[i].threading.itc)); 3246 mhd_daemon_trigger_itc (d->threading.hier.pool.workers + i); 3247 } 3248 3249 /* Collect all threads */ 3250 for (i = num_workers - 1; num_workers > i; --i) 3251 { /* Note: loop exits after underflow of 'i' */ 3252 struct MHD_Daemon *const restrict worker = 3253 d->threading.hier.pool.workers + i; 3254 mhd_assert (mhd_thread_handle_ID_is_valid_handle (worker->threading.tid)); 3255 if (! mhd_thread_handle_ID_join_thread (worker->threading.tid)) 3256 { 3257 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_ERROR, \ 3258 "Failed to stop a worker thread."); 3259 } 3260 } 3261 } 3262 3263 3264 /** 3265 * Start the workers pool threads. 3266 * Must be called only for master daemons with thread pool. 3267 * @param d the daemon object, must be completely initialised 3268 * @return #MHD_SC_OK on success, 3269 * the error code otherwise 3270 */ 3271 static MHD_FN_PAR_NONNULL_ (1) 3272 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3273 start_worker_pool_threads (struct MHD_Daemon *restrict d) 3274 { 3275 enum MHD_StatusCode res; 3276 unsigned int i; 3277 3278 mhd_assert (d->dbg.threading_inited); 3279 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3280 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3281 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3282 mhd_assert (d->dbg.thread_pool_inited); 3283 mhd_assert (2 <= d->threading.hier.pool.num); 3284 3285 res = MHD_SC_OK; 3286 3287 for (i = 0; d->threading.hier.pool.num > i; ++i) 3288 { 3289 res = start_individual_daemon_thread (d->threading.hier.pool.workers + i); 3290 if (MHD_SC_OK != res) 3291 break; 3292 } 3293 if (d->threading.hier.pool.num == i) 3294 { 3295 mhd_assert (MHD_SC_OK == res); 3296 return MHD_SC_OK; 3297 } 3298 3299 stop_worker_pool_threads (d, i); 3300 mhd_assert (MHD_SC_OK != res); 3301 return res; 3302 } 3303 3304 3305 #endif /* MHD_SUPPORT_THREADS */ 3306 3307 /** 3308 * Start the daemon internal threads, if the daemon configured to use them. 3309 * @param d the daemon object, must be completely initialised 3310 * @return #MHD_SC_OK on success, 3311 * the error code otherwise 3312 */ 3313 static MHD_FN_PAR_NONNULL_ (1) 3314 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3315 daemon_start_threads (struct MHD_Daemon *restrict d) 3316 { 3317 mhd_assert (d->dbg.net_inited); 3318 mhd_assert (! d->dbg.net_deinited); 3319 mhd_assert (d->dbg.threading_inited); 3320 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3321 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 3322 { 3323 #ifdef MHD_SUPPORT_THREADS 3324 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL != d->wmode_int) 3325 { 3326 mhd_assert (d->dbg.threading_inited); 3327 mhd_assert (mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY != d->threading.d_type); 3328 return start_individual_daemon_thread (d); 3329 } 3330 else 3331 { 3332 mhd_assert (d->dbg.thread_pool_inited); 3333 mhd_assert (mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY == d->threading.d_type); 3334 return start_worker_pool_threads (d); 3335 } 3336 #else /* ! MHD_SUPPORT_THREADS */ 3337 mhd_assert (0 && "Impossible value"); 3338 mhd_UNREACHABLE (); 3339 return MHD_SC_INTERNAL_ERROR; 3340 #endif /* ! MHD_SUPPORT_THREADS */ 3341 } 3342 return MHD_SC_OK; 3343 } 3344 3345 3346 /** 3347 * Stop the daemon internal threads, if the daemon configured to use them. 3348 * @param d the daemon object to stop threads 3349 */ 3350 static MHD_FN_PAR_NONNULL_ (1) void 3351 daemon_stop_threads (struct MHD_Daemon *restrict d) 3352 { 3353 mhd_assert (d->dbg.net_inited); 3354 mhd_assert (! d->dbg.net_deinited); 3355 mhd_assert (d->dbg.threading_inited); 3356 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 3357 { 3358 #ifdef MHD_SUPPORT_THREADS 3359 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL != d->wmode_int) 3360 { 3361 mhd_assert (d->dbg.threading_inited); 3362 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3363 stop_individual_daemon_thread (d); 3364 return; 3365 } 3366 else 3367 { 3368 mhd_assert (d->dbg.thread_pool_inited); 3369 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3370 stop_worker_pool_threads (d, d->threading.hier.pool.num); 3371 return; 3372 } 3373 #else /* ! MHD_SUPPORT_THREADS */ 3374 mhd_UNREACHABLE (); 3375 return; 3376 #endif /* ! MHD_SUPPORT_THREADS */ 3377 } 3378 } 3379 3380 3381 /** 3382 * Close all daemon connections for modes without internal threads 3383 * @param d the daemon object 3384 */ 3385 static MHD_FN_PAR_NONNULL_ (1) void 3386 daemon_close_connections (struct MHD_Daemon *restrict d) 3387 { 3388 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 3389 { 3390 /* In these modes connections must be closed in the daemon thread */ 3391 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)); 3392 return; 3393 } 3394 3395 mhd_daemon_close_all_conns (d); 3396 } 3397 3398 3399 /** 3400 * Internal daemon initialisation function. 3401 * This function calls all required initialisation stages one-by-one. 3402 * @param d the daemon object 3403 * @param s the user settings 3404 * @return #MHD_SC_OK on success, 3405 * the error code otherwise 3406 */ 3407 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3408 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3409 daemon_start_internal (struct MHD_Daemon *restrict d, 3410 struct DaemonOptions *restrict s) 3411 { 3412 enum MHD_StatusCode res; 3413 3414 res = daemon_set_basic_settings (d, s); 3415 if (MHD_SC_OK != res) 3416 return res; 3417 3418 res = daemon_set_work_mode (d, s); 3419 if (MHD_SC_OK != res) 3420 return res; 3421 3422 res = daemon_init_net (d, s); 3423 if (MHD_SC_OK != res) 3424 return res; 3425 3426 mhd_assert (d->dbg.net_inited); 3427 3428 res = daemon_init_auth_digest (d, s); 3429 3430 if (MHD_SC_OK == res) 3431 { 3432 res = daemon_init_tls (d, s); 3433 if (MHD_SC_OK == res) 3434 { 3435 mhd_assert (d->dbg.tls_inited); 3436 res = daemon_init_threading_and_conn (d, s); 3437 if (MHD_SC_OK == res) 3438 { 3439 mhd_assert (d->dbg.threading_inited); 3440 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3441 3442 res = daemon_init_large_buf (d, s); 3443 if (MHD_SC_OK == res) 3444 { 3445 res = daemon_start_threads (d); 3446 if (MHD_SC_OK == res) 3447 { 3448 return MHD_SC_OK; 3449 } 3450 3451 /* Below is a clean-up path */ 3452 daemon_deinit_large_buf (d); 3453 } 3454 daemon_deinit_threading_and_conn (d); 3455 } 3456 daemon_deinit_tls (d); 3457 } 3458 daemon_deinit_auth_digest (d); 3459 } 3460 daemon_deinit_net (d); 3461 mhd_assert (MHD_SC_OK != res); 3462 return res; 3463 } 3464 3465 3466 MHD_EXTERN_ 3467 MHD_FN_PAR_NONNULL_ (1) MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3468 MHD_daemon_start (struct MHD_Daemon *daemon) 3469 { 3470 struct MHD_Daemon *const d = daemon; /* a short alias */ 3471 struct DaemonOptions *const s = daemon->settings; /* a short alias */ 3472 enum MHD_StatusCode res; 3473 3474 if (mhd_DAEMON_STATE_NOT_STARTED != daemon->state) 3475 return MHD_SC_TOO_LATE; 3476 3477 mhd_assert (NULL != s); 3478 3479 d->state = mhd_DAEMON_STATE_STARTING; 3480 res = daemon_start_internal (d, s); 3481 3482 d->settings = NULL; 3483 dsettings_release (s); 3484 3485 d->state = 3486 (MHD_SC_OK == res) ? mhd_DAEMON_STATE_STARTED : mhd_DAEMON_STATE_FAILED; 3487 3488 return res; 3489 } 3490 3491 3492 MHD_EXTERN_ MHD_FN_PAR_NONNULL_ALL_ void 3493 MHD_daemon_destroy (struct MHD_Daemon *daemon) 3494 { 3495 bool not_yet_started = (mhd_DAEMON_STATE_NOT_STARTED == daemon->state); 3496 bool has_failed = (mhd_DAEMON_STATE_FAILED == daemon->state); 3497 mhd_assert (mhd_DAEMON_STATE_STOPPING > daemon->state); 3498 mhd_assert (mhd_DAEMON_STATE_STARTING != daemon->state); 3499 3500 daemon->state = mhd_DAEMON_STATE_STOPPING; 3501 if (not_yet_started) 3502 { 3503 mhd_assert (NULL != daemon->settings); 3504 dsettings_release (daemon->settings); 3505 } 3506 else if (! has_failed) 3507 { 3508 mhd_assert (NULL == daemon->settings); 3509 mhd_assert (daemon->dbg.threading_inited); 3510 3511 daemon_stop_threads (daemon); 3512 3513 daemon_close_connections (daemon); 3514 3515 daemon_deinit_threading_and_conn (daemon); 3516 3517 daemon_deinit_large_buf (daemon); 3518 3519 daemon_deinit_tls (daemon); 3520 3521 daemon_deinit_auth_digest (daemon); 3522 3523 daemon_deinit_net (daemon); 3524 } 3525 daemon->state = mhd_DAEMON_STATE_STOPPED; /* Useful only for debugging */ 3526 3527 free (daemon); 3528 3529 mhd_lib_deinit_global_if_needed (); 3530 }