daemon_start.c (122421B)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */ 2 /* 3 This file is part of GNU libmicrohttpd. 4 Copyright (C) 2024 Evgeny Grin (Karlson2k) 5 6 GNU libmicrohttpd is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Lesser General Public 8 License as published by the Free Software Foundation; either 9 version 2.1 of the License, or (at your option) any later version. 10 11 GNU libmicrohttpd is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Lesser General Public License for more details. 15 16 Alternatively, you can redistribute GNU libmicrohttpd and/or 17 modify it under the terms of the GNU General Public License as 18 published by the Free Software Foundation; either version 2 of 19 the License, or (at your option) any later version, together 20 with the eCos exception, as follows: 21 22 As a special exception, if other files instantiate templates or 23 use macros or inline functions from this file, or you compile this 24 file and link it with other works to produce a work based on this 25 file, this file does not by itself cause the resulting work to be 26 covered by the GNU General Public License. However the source code 27 for this file must still be made available in accordance with 28 section (3) of the GNU General Public License v2. 29 30 This exception does not invalidate any other reasons why a work 31 based on this file might be covered by the GNU General Public 32 License. 33 34 You should have received copies of the GNU Lesser General Public 35 License and the GNU General Public License along with this library; 36 if not, see <https://www.gnu.org/licenses/>. 37 */ 38 39 /** 40 * @file src/mhd2/daemon_start.c 41 * @brief The implementation of the MHD_daemon_start() 42 * @author Karlson2k (Evgeny Grin) 43 */ 44 45 #include "mhd_sys_options.h" 46 47 #include "mhd_assert.h" 48 #include "mhd_unreachable.h" 49 #include "mhd_assume.h" 50 51 #include "mhd_constexpr.h" 52 53 #include "sys_bool_type.h" 54 #include "sys_base_types.h" 55 #include "sys_malloc.h" 56 #include "compat_calloc.h" 57 58 #include <string.h> 59 #include "sys_sockets_types.h" 60 #include "sys_sockets_headers.h" 61 #include "mhd_sockets_macros.h" 62 #include "sys_ip_headers.h" 63 64 #include "mhd_atomic_counter.h" 65 66 #ifdef MHD_SOCKETS_KIND_POSIX 67 # include "sys_errno.h" 68 #endif 69 #ifdef MHD_SUPPORT_EPOLL 70 # include <sys/epoll.h> 71 #endif 72 73 #ifdef MHD_SOCKETS_KIND_POSIX 74 # include <fcntl.h> 75 # ifdef MHD_SUPPORT_SELECT 76 # ifdef HAVE_SYS_SELECT_H 77 # include <sys/select.h> /* For FD_SETSIZE */ 78 # else 79 # ifdef HAVE_SYS_TIME_H 80 # include <sys/time.h> 81 # endif 82 # ifdef HAVE_SYS_TYPES_H 83 # include <sys/types.h> 84 # endif 85 # ifdef HAVE_UNISTD_H 86 # include <unistd.h> 87 # endif 88 # endif 89 # endif 90 #endif 91 92 #include "extr_events_funcs.h" 93 94 #include "mhd_dbg_print.h" 95 96 #include "mhd_limits.h" 97 98 #include "mhd_daemon.h" 99 #include "daemon_options.h" 100 101 #include "mhd_sockets_funcs.h" 102 103 #include "mhd_lib_init.h" 104 #include "daemon_logger.h" 105 106 #ifdef MHD_SUPPORT_HTTPS 107 # include "mhd_tls_common.h" 108 # include "mhd_tls_funcs.h" 109 #endif 110 111 #include "events_process.h" 112 113 #ifdef MHD_SUPPORT_THREADS 114 # include "mhd_itc.h" 115 # include "mhd_threads.h" 116 # include "daemon_funcs.h" 117 #endif 118 119 #include "mhd_public_api.h" 120 121 122 /** 123 * The default value for fastopen queue length (currently GNU/Linux only) 124 */ 125 #define MHD_TCP_FASTOPEN_DEF_QUEUE_LEN 64 126 127 /** 128 * Release any internally allocated pointers, then deallocate the settings. 129 * @param s the pointer to the settings to release 130 */ 131 static void 132 dsettings_release (struct DaemonOptions *s) 133 { 134 /* Release starting from the last member */ 135 if (NULL != s->random_entropy.v_buf) 136 free (s->random_entropy.v_buf); 137 if (MHD_INVALID_SOCKET != s->listen_socket) 138 mhd_socket_close (s->listen_socket); 139 if (NULL != s->bind_sa.v_sa) 140 free (s->bind_sa.v_sa); 141 if (NULL != s->tls_cert_key.v_mem_cert) 142 free (s->tls_cert_key.v_mem_cert); 143 free (s); 144 } 145 146 147 /** 148 * Set basic daemon parameters that not require additional initialisation. 149 * Mostly copy such parameters from the settings object to the daemon object. 150 * @param d the daemon object 151 * @param s the user settings 152 * @return MHD_SC_OK on success, 153 * the error code otherwise 154 */ 155 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 156 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 157 daemon_set_basic_settings (struct MHD_Daemon *restrict d, 158 struct DaemonOptions *restrict s) 159 { 160 mhd_constexpr uint_fast64_t max_timeout_ms_value = 1209600000u; 161 162 #ifdef MHD_SUPPORT_HTTP2 163 // TODO: make it configurable 164 d->http_cfg.http1x = true; 165 d->http_cfg.http2 = true; 166 #endif /* MHD_SUPPORT_HTTP2 */ 167 168 d->req_cfg.strictness = s->protocol_strict_level.v_sl; 169 170 #ifdef MHD_SUPPORT_COOKIES 171 d->req_cfg.disable_cookies = (MHD_NO != s->disable_cookies); 172 #endif 173 174 d->req_cfg.suppress_date = (MHD_NO != s->suppress_date_header); 175 176 d->conns.cfg.timeout_milsec = s->default_timeout_milsec; 177 if (max_timeout_ms_value < d->conns.cfg.timeout_milsec) 178 d->conns.cfg.timeout_milsec = max_timeout_ms_value; 179 180 d->conns.cfg.per_ip_limit = s->per_ip_limit; 181 182 return MHD_SC_OK; 183 } 184 185 186 /** 187 * Set the daemon work mode. 188 * This function also checks whether requested work mode is supported by 189 * current build and whether work mode is compatible with requested events 190 * polling technique. 191 * @param d the daemon object 192 * @param s the user settings 193 * @return MHD_SC_OK on success, 194 * the error code otherwise 195 */ 196 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 197 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 198 daemon_set_work_mode (struct MHD_Daemon *restrict d, 199 struct DaemonOptions *restrict s) 200 { 201 switch (s->work_mode.mode) 202 { 203 case MHD_WM_EXTERNAL_PERIODIC: 204 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS; 205 break; 206 case MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL: 207 case MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE: 208 if (MHD_SPS_AUTO != s->poll_syscall) 209 { 210 mhd_LOG_MSG ( \ 211 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 212 "The requested work mode is not compatible with setting " \ 213 "socket polling syscall."); 214 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 215 } 216 if (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL == s->work_mode.mode) 217 d->wmode_int = mhd_WM_INT_EXTERNAL_EVENTS_LEVEL; 218 else 219 d->wmode_int = mhd_WM_INT_EXTERNAL_EVENTS_EDGE; 220 break; 221 case MHD_WM_EXTERNAL_SINGLE_FD_WATCH: 222 if ((MHD_SPS_AUTO != s->poll_syscall) && 223 (MHD_SPS_EPOLL != s->poll_syscall)) 224 { 225 mhd_LOG_MSG ( \ 226 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 227 "The requested work mode MHD_WM_EXTERNAL_SINGLE_FD_WATCH " \ 228 "is not compatible with requested socket polling syscall."); 229 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 230 } 231 #ifndef MHD_SUPPORT_EPOLL 232 mhd_LOG_MSG ( \ 233 d, MHD_SC_FEATURE_DISABLED, \ 234 "The epoll is required for the requested work mode " \ 235 "MHD_WM_EXTERNAL_SINGLE_FD_WATCH, but not available on this " \ 236 "platform or MHD build."); 237 return MHD_SC_FEATURE_DISABLED; 238 #else 239 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS; 240 #endif 241 break; 242 case MHD_WM_THREAD_PER_CONNECTION: 243 if (MHD_SPS_EPOLL == s->poll_syscall) 244 { 245 mhd_LOG_MSG ( \ 246 d, MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID, \ 247 "The requested work mode MHD_WM_THREAD_PER_CONNECTION " \ 248 "is not compatible with 'epoll' sockets polling."); 249 return MHD_SC_SYSCALL_WORK_MODE_COMBINATION_INVALID; 250 } 251 mhd_FALLTHROUGH; 252 /* Intentional fallthrough */ 253 case MHD_WM_WORKER_THREADS: 254 #ifndef MHD_SUPPORT_THREADS 255 mhd_LOG_MSG (d, MHD_SC_FEATURE_DISABLED, \ 256 "The internal threads modes are not supported by this " \ 257 "build of MHD."); 258 return MHD_SC_FEATURE_DISABLED; 259 #else /* MHD_SUPPORT_THREADS */ 260 if (MHD_WM_THREAD_PER_CONNECTION == s->work_mode.mode) 261 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION; 262 else if (1 >= s->work_mode.params.num_worker_threads) /* && (MHD_WM_WORKER_THREADS == s->work_mode.mode) */ 263 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_ONE_THREAD; 264 else 265 d->wmode_int = mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL; 266 #endif /* MHD_SUPPORT_THREADS */ 267 break; 268 default: 269 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_UNEXPECTED_WM, \ 270 "Wrong requested work mode."); 271 return MHD_SC_CONFIGURATION_UNEXPECTED_WM; 272 } 273 274 if ((mhd_WM_INT_EXTERNAL_EVENTS_LEVEL != d->wmode_int) && 275 (mhd_WM_INT_EXTERNAL_EVENTS_EDGE != d->wmode_int) && 276 (MHD_NO != s->reregister_all)) 277 { 278 mhd_LOG_MSG ( \ 279 d, \ 280 MHD_SC_EXTERNAL_EVENT_ONLY, \ 281 "The MHD_D_O_REREGISTER_ALL option can be used only with external " \ 282 "events work modes."); 283 return MHD_SC_EXTERNAL_EVENT_ONLY; 284 } 285 286 return MHD_SC_OK; 287 } 288 289 290 union mhd_SockaddrAny 291 { 292 struct sockaddr sa; 293 struct sockaddr_in sa_i4; 294 #ifdef HAVE_INET6 295 struct sockaddr_in6 sa_i6; 296 #endif /* HAVE_INET6 */ 297 struct sockaddr_storage sa_stor; 298 }; 299 300 301 /** 302 * The type of the socket to create 303 */ 304 enum mhd_CreateSktType 305 { 306 /** 307 * Unknown address family (could be IP or not IP) 308 */ 309 mhd_SKT_UNKNOWN = -4 310 , 311 /** 312 * The socket is not IP. 313 */ 314 mhd_SKT_NON_IP = -2 315 , 316 /** 317 * The socket is UNIX. 318 */ 319 mhd_SKT_UNIX = -1 320 , 321 /** 322 * No socket 323 */ 324 mhd_SKT_NO_SOCKET = MHD_AF_NONE 325 , 326 /** 327 * IPv4 only 328 */ 329 mhd_SKT_IP_V4_ONLY = MHD_AF_INET4 330 , 331 /** 332 * IPv6 only 333 */ 334 mhd_SKT_IP_V6_ONLY = MHD_AF_INET6 335 , 336 /** 337 * IPv6 with dual stack enabled 338 */ 339 mhd_SKT_IP_DUAL_REQUIRED = MHD_AF_DUAL 340 , 341 /** 342 * Try IPv6 with dual stack then IPv4 343 */ 344 mhd_SKT_IP_V4_WITH_V6_OPT = MHD_AF_DUAL_v6_OPTIONAL 345 , 346 /** 347 * IPv6 with optional dual stack 348 */ 349 mhd_SKT_IP_V6_WITH_V4_OPT = MHD_AF_DUAL_v4_OPTIONAL 350 , 351 /** 352 * Try IPv4 then IPv6 with optional dual stack 353 */ 354 mhd_SKT_IP_V4_WITH_FALLBACK = 16 355 }; 356 357 /** 358 * Create socket, bind to the address and start listening on the socket. 359 * 360 * The socket is assigned to the daemon as listening FD. 361 * @param d the daemon to use 362 * @param s the user settings 363 * @param v6_tried true if IPv6 has been tried already 364 * @param force_v6_any_dual true if IPv6 is forced with dual stack either 365 * enabled or not 366 * @param prev_bnd_lstn_err if this function was already tried with another and 367 * failed to bind or to start listening then 368 * this parameter must be set to respecting status 369 * code, otherwise this parameter must be #MHD_SC_OK 370 * @return #MHD_SC_OK on success, 371 * the error code otherwise (no error printed to log if result is 372 * #MHD_SC_LISTEN_SOCKET_BIND_FAILED or #MHD_SC_LISTEN_FAILURE) 373 */ 374 static enum MHD_StatusCode 375 create_bind_listen_stream_socket_inner (struct MHD_Daemon *restrict d, 376 struct DaemonOptions *restrict s, 377 bool v6_tried, 378 bool force_v6_any_dual, 379 enum MHD_StatusCode prev_bnd_lstn_err) 380 { 381 MHD_Socket sk; 382 enum mhd_CreateSktType sk_type; 383 bool sk_already_listening; 384 union mhd_SockaddrAny sa_all; 385 const struct sockaddr *p_use_sa; 386 socklen_t use_sa_size; 387 uint_least16_t sk_port; 388 bool is_non_block; 389 bool is_non_inhr; 390 enum MHD_StatusCode ret; 391 392 sk = MHD_INVALID_SOCKET; 393 sk_type = mhd_SKT_NO_SOCKET; 394 sk_already_listening = false; 395 p_use_sa = NULL; 396 use_sa_size = 0; 397 sk_port = 0; 398 399 #ifndef HAVE_INET6 400 mhd_assert (! v6_tried); 401 mhd_assert (! force_v6_any_dual); 402 #endif 403 mhd_assert (mhd_SKT_NO_SOCKET == sk_type); /* Mute analyser warning */ 404 405 if (MHD_INVALID_SOCKET != s->listen_socket) 406 { 407 mhd_assert (! v6_tried); 408 mhd_assert (! force_v6_any_dual); 409 /* Check for options conflicts */ 410 if (0 != s->bind_sa.v_sa_len) 411 { 412 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 413 "MHD_D_O_BIND_SA cannot be used together " \ 414 "with MHD_D_O_LISTEN_SOCKET"); 415 return MHD_SC_OPTIONS_CONFLICT; 416 } 417 else if (MHD_AF_NONE != s->bind_port.v_af) 418 { 419 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 420 "MHD_D_O_BIND_PORT cannot be used together " \ 421 "with MHD_D_O_LISTEN_SOCKET"); 422 return MHD_SC_OPTIONS_CONFLICT; 423 } 424 425 /* No options conflicts */ 426 sk = s->listen_socket; 427 s->listen_socket = MHD_INVALID_SOCKET; /* Prevent closing with settings cleanup */ 428 sk_type = mhd_SKT_UNKNOWN; 429 sk_already_listening = true; 430 } 431 else if ((0 != s->bind_sa.v_sa_len) || (MHD_AF_NONE != s->bind_port.v_af)) 432 { 433 if (0 != s->bind_sa.v_sa_len) 434 { 435 mhd_assert (! v6_tried); 436 mhd_assert (! force_v6_any_dual); 437 438 /* Check for options conflicts */ 439 if (MHD_AF_NONE != s->bind_port.v_af) 440 { 441 mhd_LOG_MSG (d, MHD_SC_OPTIONS_CONFLICT, \ 442 "MHD_D_O_BIND_SA cannot be used together " \ 443 "with MHD_D_O_BIND_PORT"); 444 return MHD_SC_OPTIONS_CONFLICT; 445 } 446 447 /* No options conflicts */ 448 switch (s->bind_sa.v_sa->sa_family) 449 { 450 case AF_INET: 451 sk_type = mhd_SKT_IP_V4_ONLY; 452 if (sizeof(sa_all.sa_i4) > s->bind_sa.v_sa_len) 453 { 454 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_WRONG_SA_SIZE, \ 455 "The size of the provided sockaddr does not match " 456 "used address family"); 457 return MHD_SC_CONFIGURATION_WRONG_SA_SIZE; 458 } 459 memcpy (&(sa_all.sa_i4), s->bind_sa.v_sa, sizeof(sa_all.sa_i4)); 460 sk_port = (uint_least16_t) ntohs (sa_all.sa_i4.sin_port); 461 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 462 mhd_assert (sizeof(sa_all.sa_i4) == (uint8_t) sizeof(sa_all.sa_i4)); 463 sa_all.sa_i4.sin_len = (uint8_t) sizeof(sa_all.sa_i4); 464 #endif 465 p_use_sa = (struct sockaddr *) &(sa_all.sa_i4); 466 use_sa_size = (socklen_t) sizeof(sa_all.sa_i4); 467 break; 468 #ifdef HAVE_INET6 469 case AF_INET6: 470 sk_type = mhd_SKT_IP_V6_ONLY; 471 if (sizeof(sa_all.sa_i6) > s->bind_sa.v_sa_len) 472 { 473 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_WRONG_SA_SIZE, \ 474 "The size of the provided sockaddr does not match " 475 "used address family"); 476 return MHD_SC_CONFIGURATION_WRONG_SA_SIZE; 477 } 478 memcpy (&(sa_all.sa_i6), s->bind_sa.v_sa, s->bind_sa.v_sa_len); 479 sk_port = (uint_least16_t) ntohs (sa_all.sa_i6.sin6_port); 480 #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 481 mhd_assert (sizeof(sa_all.sa_i6) == (uint8_t) sizeof(sa_all.sa_i6)); 482 sa_all.sa_i6.sin6_len = (uint8_t) sizeof(sa_all.sa_i6); 483 #endif 484 p_use_sa = (struct sockaddr *) &(sa_all.sa_i6); 485 use_sa_size = (socklen_t) sizeof(sa_all.sa_i6); 486 break; 487 #endif /* HAVE_INET6 */ 488 #ifdef MHD_AF_UNIX 489 case MHD_AF_UNIX: 490 sk_type = mhd_SKT_UNIX; 491 p_use_sa = NULL; /* To be set below */ 492 break; 493 #endif /* MHD_AF_UNIX */ 494 default: 495 sk_type = mhd_SKT_UNKNOWN; 496 p_use_sa = NULL; /* To be set below */ 497 break; 498 } 499 500 if (s->bind_sa.v_dual) 501 { 502 if (mhd_SKT_IP_V6_ONLY != sk_type) 503 { 504 mhd_LOG_MSG (d, MHD_SC_LISTEN_DUAL_STACK_NOT_SUITABLE, \ 505 "IP dual stack is not possible for provided sockaddr"); 506 } 507 #ifdef HAVE_INET6 508 else 509 { 510 #ifdef HAVE_DCLR_IPV6_V6ONLY 511 sk_type = mhd_SKT_IP_DUAL_REQUIRED; 512 #else /* ! IPV6_V6ONLY */ 513 mhd_LOG_MSG (d, \ 514 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_NOT_SUPPORTED, \ 515 "IP dual stack is not supported by this platform or " \ 516 "by this MHD build"); 517 #endif /* ! IPV6_V6ONLY */ 518 } 519 #endif /* HAVE_INET6 */ 520 } 521 522 if (NULL == p_use_sa) 523 { 524 #if defined(HAVE_STRUCT_SOCKADDR_SA_LEN) && \ 525 defined(HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN) 526 if ((((size_t) s->bind_sa.v_sa->sa_len) != s->bind_sa.v_sa_len) && 527 (sizeof(sa_all) >= s->bind_sa.v_sa_len)) 528 { 529 /* Fix embedded 'sa_len' member if possible */ 530 memcpy (&sa_all, s->bind_sa.v_sa, s->bind_sa.v_sa_len); 531 mhd_assert (s->bind_sa.v_sa_len == (uint8_t) s->bind_sa.v_sa_len); 532 sa_all.sa_stor.ss_len = (uint8_t) s->bind_sa.v_sa_len; 533 p_use_sa = (const struct sockaddr *) &(sa_all.sa_stor); 534 } 535 else 536 #endif /* HAVE_STRUCT_SOCKADDR_SA_LEN && HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN */ 537 p_use_sa = s->bind_sa.v_sa; 538 use_sa_size = (socklen_t) s->bind_sa.v_sa_len; 539 } 540 } 541 else /* if (MHD_AF_NONE != s->bind_port.v_af) */ 542 { 543 /* No options conflicts */ 544 switch (s->bind_port.v_af) 545 { 546 case MHD_AF_NONE: 547 mhd_assert (0); 548 mhd_UNREACHABLE (); 549 return MHD_SC_INTERNAL_ERROR; 550 case MHD_AF_AUTO: 551 #ifdef HAVE_INET6 552 #ifdef HAVE_DCLR_IPV6_V6ONLY 553 if (force_v6_any_dual) 554 sk_type = mhd_SKT_IP_V6_WITH_V4_OPT; 555 else if (v6_tried) 556 sk_type = mhd_SKT_IP_V4_WITH_FALLBACK; 557 else 558 sk_type = mhd_SKT_IP_V4_WITH_V6_OPT; 559 #else /* ! IPV6_V6ONLY */ 560 mhd_assert (! v6_tried); 561 if (force_v6_any_dual) 562 sk_type = mhd_SKT_IP_V6_ONLY; 563 else 564 sk_type = mhd_SKT_IP_V4_WITH_FALLBACK; 565 #endif /* ! IPV6_V6ONLY */ 566 #else /* ! HAVE_INET6 */ 567 sk_type = mhd_SKT_IP_V4_ONLY; 568 #endif /* ! HAVE_INET6 */ 569 break; 570 case MHD_AF_INET4: 571 mhd_assert (! v6_tried); 572 mhd_assert (! force_v6_any_dual); 573 sk_type = mhd_SKT_IP_V4_ONLY; 574 break; 575 case MHD_AF_INET6: 576 mhd_assert (! v6_tried); 577 mhd_assert (! force_v6_any_dual); 578 #ifdef HAVE_INET6 579 sk_type = mhd_SKT_IP_V6_ONLY; 580 #else /* ! HAVE_INET6 */ 581 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 582 "IPv6 is not supported by this MHD build or " \ 583 "by this platform"); 584 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 585 #endif /* ! HAVE_INET6 */ 586 break; 587 case MHD_AF_DUAL: 588 mhd_assert (! v6_tried); 589 mhd_assert (! force_v6_any_dual); 590 #ifdef HAVE_INET6 591 #ifdef HAVE_DCLR_IPV6_V6ONLY 592 sk_type = mhd_SKT_IP_DUAL_REQUIRED; 593 #else /* ! IPV6_V6ONLY */ 594 mhd_LOG_MSG (d, 595 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_NOT_SUPPORTED, \ 596 "IP dual stack is not supported by this platform or " \ 597 "by this MHD build"); 598 sk_type = mhd_SKT_IP_V6_ONLY; 599 #endif /* ! IPV6_V6ONLY */ 600 #else /* ! HAVE_INET6 */ 601 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 602 "IPv6 is not supported by this MHD build or " \ 603 "by this platform"); 604 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 605 #endif /* ! HAVE_INET6 */ 606 break; 607 case MHD_AF_DUAL_v4_OPTIONAL: 608 mhd_assert (! v6_tried); 609 mhd_assert (! force_v6_any_dual); 610 #ifdef HAVE_INET6 611 #ifdef HAVE_DCLR_IPV6_V6ONLY 612 sk_type = mhd_SKT_IP_V6_WITH_V4_OPT; 613 #else /* ! IPV6_V6ONLY */ 614 sk_type = mhd_SKT_IP_V6_ONLY; 615 #endif /* ! IPV6_V6ONLY */ 616 #else /* ! HAVE_INET6 */ 617 mhd_LOG_MSG (d, MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD, \ 618 "IPv6 is not supported by this MHD build or " \ 619 "by this platform"); 620 return MHD_SC_IPV6_NOT_SUPPORTED_BY_BUILD; 621 #endif /* ! HAVE_INET6 */ 622 break; 623 case MHD_AF_DUAL_v6_OPTIONAL: 624 mhd_assert (! force_v6_any_dual); 625 #ifdef HAVE_INET6 626 #ifdef HAVE_DCLR_IPV6_V6ONLY 627 sk_type = (! v6_tried) ? 628 mhd_SKT_IP_V4_WITH_V6_OPT : mhd_SKT_IP_V4_ONLY; 629 #else /* ! IPV6_V6ONLY */ 630 mhd_assert (! v6_tried); 631 sk_type = mhd_SKT_IP_V4_ONLY; 632 #endif /* ! IPV6_V6ONLY */ 633 #else /* ! HAVE_INET6 */ 634 mhd_assert (! v6_tried); 635 sk_type = mhd_SKT_IP_V4_ONLY; 636 #endif /* ! HAVE_INET6 */ 637 break; 638 default: 639 mhd_LOG_MSG (d, MHD_SC_AF_NOT_SUPPORTED_BY_BUILD, \ 640 "Unknown address family specified"); 641 return MHD_SC_AF_NOT_SUPPORTED_BY_BUILD; 642 } 643 644 mhd_assert (mhd_SKT_NO_SOCKET < sk_type); 645 646 switch (sk_type) 647 { 648 case mhd_SKT_IP_V4_ONLY: 649 case mhd_SKT_IP_V4_WITH_FALLBACK: 650 /* Zeroing is not required, but may help on exotic platforms */ 651 memset (&(sa_all.sa_i4), 0, sizeof(sa_all.sa_i4)); 652 sa_all.sa_i4.sin_family = AF_INET; 653 sa_all.sa_i4.sin_port = htons (s->bind_port.v_port); 654 sa_all.sa_i4.sin_addr.s_addr = INADDR_ANY; 655 if (0 != INADDR_ANY) /* Optimised at compile time */ 656 sa_all.sa_i4.sin_addr.s_addr = htonl (INADDR_ANY); 657 #ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN 658 sa_all.sa_i4.sin_len = (uint8_t) sizeof (sa_all.sa_i4); 659 #endif 660 p_use_sa = (const struct sockaddr *) &(sa_all.sa_i4); 661 use_sa_size = (socklen_t) sizeof (sa_all.sa_i4); 662 break; 663 case mhd_SKT_IP_V6_ONLY: 664 case mhd_SKT_IP_DUAL_REQUIRED: 665 case mhd_SKT_IP_V4_WITH_V6_OPT: 666 case mhd_SKT_IP_V6_WITH_V4_OPT: 667 #ifdef HAVE_INET6 668 if (1) 669 { 670 #ifdef IN6ADDR_ANY_INIT 671 static const struct in6_addr static_in6any = IN6ADDR_ANY_INIT; 672 #endif 673 /* Zeroing is required by POSIX */ 674 memset (&(sa_all.sa_i6), 0, sizeof(sa_all.sa_i6)); 675 sa_all.sa_i6.sin6_family = AF_INET6; 676 sa_all.sa_i6.sin6_port = htons (s->bind_port.v_port); 677 #ifdef IN6ADDR_ANY_INIT /* Optional assignment at the address is all zeros anyway */ 678 sa_all.sa_i6.sin6_addr = static_in6any; 679 #endif 680 #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN 681 sa_all.sa_i6.sin6_len = (uint8_t) sizeof (sa_all.sa_i6); 682 #endif 683 p_use_sa = (const struct sockaddr *) &(sa_all.sa_i6); 684 use_sa_size = (socklen_t) sizeof (sa_all.sa_i6); 685 } 686 break; 687 #endif /* HAVE_INET6 */ 688 case mhd_SKT_UNKNOWN: 689 case mhd_SKT_NON_IP: 690 case mhd_SKT_UNIX: 691 case mhd_SKT_NO_SOCKET: 692 default: 693 mhd_UNREACHABLE (); 694 return MHD_SC_INTERNAL_ERROR; 695 } 696 697 sk_port = s->bind_port.v_port; 698 699 } 700 } 701 else 702 { 703 /* No listen socket */ 704 d->net.listen.fd = MHD_INVALID_SOCKET; 705 d->net.listen.is_broken = false; 706 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 707 d->net.listen.non_block = false; 708 d->net.listen.port = 0; 709 710 return MHD_SC_OK; 711 } 712 713 mhd_assert (mhd_SKT_NO_SOCKET != sk_type); 714 mhd_assert ((NULL != p_use_sa) || sk_already_listening); 715 mhd_assert ((MHD_INVALID_SOCKET == sk) || sk_already_listening); 716 717 if (MHD_INVALID_SOCKET == sk) 718 { 719 mhd_assert (NULL != p_use_sa); 720 #if defined(MHD_SOCKETS_KIND_WINSOCK) && defined(WSA_FLAG_NO_HANDLE_INHERIT) 721 /* May fail before Win7 SP1 */ 722 sk = WSASocketW (p_use_sa->sa_family, SOCK_STREAM, 0, 723 NULL, 0, WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT); 724 725 if (MHD_INVALID_SOCKET == sk) 726 #endif /* MHD_SOCKETS_KIND_WINSOCK && WSA_FLAG_NO_HANDLE_INHERIT */ 727 sk = socket (p_use_sa->sa_family, 728 SOCK_STREAM | mhd_SOCK_NONBLOCK 729 | mhd_SOCK_CLOEXEC | mhd_SOCK_NOSIGPIPE, 0); 730 731 if (MHD_INVALID_SOCKET == sk) 732 { 733 #ifdef HAVE_INET6 734 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 735 return create_bind_listen_stream_socket_inner (d, 736 s, 737 v6_tried, 738 true, 739 prev_bnd_lstn_err); 740 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 741 return create_bind_listen_stream_socket_inner (d, 742 s, 743 true, 744 false, 745 prev_bnd_lstn_err); 746 #endif /* HAVE_INET6 */ 747 748 if (MHD_SC_OK != prev_bnd_lstn_err) 749 return prev_bnd_lstn_err; 750 751 if (mhd_SCKT_LERR_IS_AF ()) 752 { 753 mhd_LOG_MSG (d, MHD_SC_AF_NOT_AVAILABLE, \ 754 "The requested socket address family is rejected " \ 755 "by the OS"); 756 return MHD_SC_AF_NOT_AVAILABLE; 757 } 758 mhd_LOG_MSG (d, MHD_SC_FAILED_TO_OPEN_LISTEN_SOCKET, \ 759 "Failed to open listen socket"); 760 761 return MHD_SC_FAILED_TO_OPEN_LISTEN_SOCKET; 762 } 763 is_non_block = (0 != mhd_SOCK_NONBLOCK); 764 is_non_inhr = (0 != mhd_SOCK_CLOEXEC); 765 } 766 else 767 { 768 is_non_block = false; /* Try to set non-block */ 769 is_non_inhr = false; /* Try to set non-inheritable */ 770 } 771 772 /* The listen socket must be closed if error code returned 773 beyond this point */ 774 775 ret = MHD_SC_OK; 776 777 do 778 { /* The scope for automatic socket close for error returns */ 779 if (! mhd_FD_FITS_DAEMON (d,sk)) 780 { 781 mhd_LOG_MSG (d, MHD_SC_LISTEN_FD_OUTSIDE_OF_SET_RANGE, \ 782 "The listen FD value is higher than allowed"); 783 ret = MHD_SC_LISTEN_FD_OUTSIDE_OF_SET_RANGE; 784 break; 785 } 786 787 if (! is_non_inhr) 788 { 789 if (! mhd_socket_noninheritable (sk)) 790 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NOINHERIT_FAILED, \ 791 "OS refused to make the listen socket non-inheritable"); 792 } 793 794 if (! sk_already_listening) 795 { 796 #ifdef HAVE_INET6 797 #ifdef HAVE_DCLR_IPV6_V6ONLY 798 if ((mhd_SKT_IP_V6_ONLY == sk_type) || 799 (mhd_SKT_IP_DUAL_REQUIRED == sk_type) || 800 (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) || 801 (mhd_SKT_IP_V6_WITH_V4_OPT == sk_type) || 802 (mhd_SKT_UNKNOWN == sk_type)) 803 { 804 mhd_SCKT_OPT_BOOL no_dual_to_set; 805 bool use_dual; 806 807 use_dual = ((mhd_SKT_IP_DUAL_REQUIRED == sk_type) || 808 (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) || 809 (mhd_SKT_IP_V6_WITH_V4_OPT == sk_type)); 810 no_dual_to_set = use_dual ? 0 : 1; 811 812 if (0 != mhd_setsockopt (sk, IPPROTO_IPV6, IPV6_V6ONLY, 813 (void *) &no_dual_to_set, 814 sizeof (no_dual_to_set))) 815 { 816 mhd_SCKT_OPT_BOOL no_dual_current; 817 socklen_t opt_size; 818 bool state_unknown; 819 bool state_match; 820 821 no_dual_current = 0; 822 opt_size = sizeof(no_dual_current); 823 824 /* Some platforms forbid setting this options, but allow 825 reading. */ 826 if ((0 != mhd_getsockopt (sk, IPPROTO_IPV6, IPV6_V6ONLY, 827 (void*) &no_dual_current, &opt_size)) 828 || (((socklen_t) sizeof(no_dual_current)) < opt_size)) 829 { 830 state_unknown = true; 831 state_match = false; 832 } 833 else 834 { 835 state_unknown = false; 836 state_match = ((! ! no_dual_current) == (! ! no_dual_to_set)); 837 } 838 839 if (state_unknown || ! state_match) 840 { 841 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 842 { 843 (void) mhd_socket_close (sk); 844 return create_bind_listen_stream_socket_inner (d, 845 s, 846 true, 847 false, 848 prev_bnd_lstn_err); 849 } 850 if (! state_unknown) 851 { 852 /* The dual-stack state is definitely wrong */ 853 if (mhd_SKT_IP_V6_ONLY == sk_type) 854 { 855 mhd_LOG_MSG ( \ 856 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED, \ 857 "Failed to disable IP dual-stack configuration " \ 858 "for the listen socket"); 859 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 860 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED : 861 prev_bnd_lstn_err; 862 break; 863 } 864 else if (mhd_SKT_UNKNOWN != sk_type) 865 { 866 mhd_LOG_MSG ( \ 867 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED, \ 868 "Cannot enable IP dual-stack configuration " \ 869 "for the listen socket"); 870 if (mhd_SKT_IP_DUAL_REQUIRED == sk_type) 871 { 872 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 873 MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_REJECTED : 874 prev_bnd_lstn_err; 875 break; 876 } 877 } 878 } 879 else 880 { 881 /* The dual-stack state is unknown */ 882 if (mhd_SKT_UNKNOWN != sk_type) 883 mhd_LOG_MSG ( 884 d, MHD_SC_LISTEN_DUAL_STACK_CONFIGURATION_UNKNOWN, \ 885 "Failed to set dual-stack (IPV6_ONLY) configuration " \ 886 "for the listen socket, using system defaults"); 887 } 888 } 889 } 890 } 891 #else /* ! IPV6_V6ONLY */ 892 mhd_assert (mhd_SKT_IP_DUAL_REQUIRED != sk_type); 893 mhd_assert (mhd_SKT_IP_V4_WITH_V6_OPT != sk_type); 894 mhd_assert (mhd_SKT_IP_V6_WITH_V4_OPT != sk_type); 895 #endif /* ! IPV6_V6ONLY */ 896 #endif /* HAVE_INET6 */ 897 898 if (MHD_FOM_AUTO <= d->settings->tcp_fastopen.v_option) 899 { 900 #if defined(HAVE_DCLR_TCP_FASTOPEN) 901 int fo_param; 902 #ifdef __linux__ 903 /* The parameter is the queue length */ 904 fo_param = (int) d->settings->tcp_fastopen.v_queue_length; 905 if (0 == fo_param) 906 fo_param = MHD_TCP_FASTOPEN_DEF_QUEUE_LEN; 907 #else /* ! __linux__ */ 908 fo_param = 1; /* The parameter is on/off type of setting */ 909 #endif /* ! __linux__ */ 910 if (0 != mhd_setsockopt (sk, IPPROTO_TCP, TCP_FASTOPEN, 911 (const void *) &fo_param, 912 sizeof (fo_param))) 913 { 914 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAST_OPEN_FAILURE, \ 915 "OS refused to enable TCP Fast Open on " \ 916 "the listen socket"); 917 if (MHD_FOM_AUTO < d->settings->tcp_fastopen.v_option) 918 { 919 ret = MHD_SC_LISTEN_FAST_OPEN_FAILURE; 920 break; 921 } 922 } 923 #else /* ! TCP_FASTOPEN */ 924 if (MHD_FOM_AUTO < d->settings->tcp_fastopen.v_option) 925 { 926 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAST_OPEN_FAILURE, \ 927 "The OS does not support TCP Fast Open"); 928 ret = MHD_SC_LISTEN_FAST_OPEN_FAILURE; 929 break; 930 } 931 #endif 932 } 933 934 if (MHD_D_OPTION_BIND_TYPE_NOT_SHARED >= d->settings->listen_addr_reuse) 935 { 936 #ifndef MHD_SOCKETS_KIND_WINSOCK 937 #ifdef HAVE_DCLR_SO_REUSEADDR 938 mhd_SCKT_OPT_BOOL on_val1 = 1; 939 if (0 != mhd_setsockopt (sk, SOL_SOCKET, SO_REUSEADDR, 940 (const void *) &on_val1, sizeof (on_val1))) 941 { 942 mhd_LOG_MSG (d, MHD_SC_LISTEN_PORT_REUSE_ENABLE_FAILED, \ 943 "OS refused to enable address reuse on " \ 944 "the listen socket"); 945 } 946 #else /* ! SO_REUSEADDR */ 947 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED, \ 948 "The OS does not support address reuse for sockets"); 949 #endif /* ! SO_REUSEADDR */ 950 #endif /* ! MHD_SOCKETS_KIND_WINSOCK */ 951 if (MHD_D_OPTION_BIND_TYPE_NOT_SHARED > d->settings->listen_addr_reuse) 952 { 953 #if defined(HAVE_DCLR_SO_REUSEPORT) || defined(MHD_SOCKETS_KIND_WINSOCK) 954 int opt_name; 955 mhd_SCKT_OPT_BOOL on_val2 = 1; 956 #ifndef MHD_SOCKETS_KIND_WINSOCK 957 opt_name = SO_REUSEPORT; 958 #else /* ! MHD_SOCKETS_KIND_WINSOCK */ 959 opt_name = SO_REUSEADDR; /* On W32 it is the same as SO_REUSEPORT on other platforms */ 960 #endif /* ! MHD_SOCKETS_KIND_WINSOCK */ 961 if (0 != mhd_setsockopt (sk, \ 962 SOL_SOCKET, \ 963 opt_name, \ 964 (const void *) &on_val2, \ 965 sizeof (on_val2))) 966 { 967 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_FAILED, \ 968 "OS refused to enable address sharing " \ 969 "on the listen socket"); 970 ret = MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_FAILED; 971 break; 972 } 973 #else /* ! SO_REUSEADDR && ! MHD_SOCKETS_KIND_WINSOCK */ 974 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED, \ 975 "The OS does not support address sharing for sockets"); 976 ret = MHD_SC_LISTEN_ADDRESS_REUSE_ENABLE_NOT_SUPPORTED; 977 break; 978 #endif /* ! SO_REUSEADDR && ! MHD_SOCKETS_KIND_WINSOCK */ 979 } 980 } 981 #if defined(SO_EXCLUSIVEADDRUSE) || defined(SO_EXCLBIND) 982 else if (MHD_D_OPTION_BIND_TYPE_EXCLUSIVE <= 983 d->settings->listen_addr_reuse) 984 { 985 int opt_name; 986 mhd_SCKT_OPT_BOOL on_val = 1; 987 #ifdef SO_EXCLUSIVEADDRUSE 988 opt_name = SO_EXCLUSIVEADDRUSE; 989 #else 990 opt_name = SO_EXCLBIND; 991 #endif 992 if (0 != mhd_setsockopt (sk, \ 993 SOL_SOCKET, \ 994 opt_name, \ 995 (const void *) &on_val, \ 996 sizeof (on_val))) 997 { 998 mhd_LOG_MSG (d, MHD_SC_LISTEN_ADDRESS_EXCLUSIVE_ENABLE_FAILED, \ 999 "OS refused to enable exclusive address use " \ 1000 "on the listen socket"); 1001 ret = MHD_SC_LISTEN_ADDRESS_EXCLUSIVE_ENABLE_FAILED; 1002 break; 1003 } 1004 } 1005 #endif /* SO_EXCLUSIVEADDRUSE || SO_EXCLBIND */ 1006 1007 mhd_assert (NULL != p_use_sa); 1008 mhd_assert (0 != use_sa_size); 1009 if (0 != bind (sk, p_use_sa, use_sa_size)) 1010 { 1011 ret = (MHD_SC_OK == prev_bnd_lstn_err) ? 1012 MHD_SC_LISTEN_SOCKET_BIND_FAILED : prev_bnd_lstn_err; 1013 #ifdef HAVE_INET6 1014 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 1015 { 1016 (void) mhd_socket_close (sk); 1017 return create_bind_listen_stream_socket_inner (d, 1018 s, 1019 v6_tried, 1020 true, 1021 ret); 1022 } 1023 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 1024 { 1025 (void) mhd_socket_close (sk); 1026 return create_bind_listen_stream_socket_inner (d, 1027 s, 1028 true, 1029 false, 1030 ret); 1031 } 1032 #endif /* HAVE_INET6 */ 1033 break; 1034 } 1035 1036 if (1) 1037 { 1038 int accept_queue_len; 1039 accept_queue_len = (int) s->listen_backlog; 1040 if (0 > accept_queue_len) 1041 accept_queue_len = 0; 1042 if (0 == accept_queue_len) 1043 { 1044 #if defined(SOMAXCONN) || defined(HAVE_DCLR_SOMAXCONN) 1045 accept_queue_len = SOMAXCONN; 1046 #else /* ! SOMAXCONN */ 1047 accept_queue_len = 127; /* Should be the safe value */ 1048 #endif /* ! SOMAXCONN */ 1049 } 1050 if (0 != listen (sk, accept_queue_len)) 1051 { 1052 ret = MHD_SC_LISTEN_FAILURE; 1053 #ifdef HAVE_INET6 1054 if (mhd_SKT_IP_V4_WITH_FALLBACK == sk_type) 1055 { 1056 (void) mhd_socket_close (sk); 1057 return create_bind_listen_stream_socket_inner (d, 1058 s, 1059 v6_tried, 1060 true, 1061 ret); 1062 } 1063 if (mhd_SKT_IP_V4_WITH_V6_OPT == sk_type) 1064 { 1065 (void) mhd_socket_close (sk); 1066 return create_bind_listen_stream_socket_inner (d, 1067 s, 1068 true, 1069 false, 1070 ret); 1071 } 1072 #endif /* HAVE_INET6 */ 1073 break; 1074 } 1075 } 1076 } 1077 /* A valid listening socket is ready here */ 1078 1079 if (! is_non_block) 1080 { 1081 is_non_block = mhd_socket_nonblocking (sk); 1082 if (! is_non_block) 1083 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE, \ 1084 "OS refused to make the listen socket non-blocking"); 1085 } 1086 1087 /* Set to the daemon only when the listening socket is fully ready */ 1088 d->net.listen.fd = sk; 1089 d->net.listen.is_broken = false; 1090 switch (sk_type) 1091 { 1092 case mhd_SKT_UNKNOWN: 1093 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 1094 break; 1095 case mhd_SKT_NON_IP: 1096 d->net.listen.type = mhd_SOCKET_TYPE_NON_IP; 1097 break; 1098 case mhd_SKT_UNIX: 1099 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1100 break; 1101 case mhd_SKT_IP_V4_ONLY: 1102 case mhd_SKT_IP_V6_ONLY: 1103 case mhd_SKT_IP_DUAL_REQUIRED: 1104 case mhd_SKT_IP_V4_WITH_V6_OPT: 1105 case mhd_SKT_IP_V6_WITH_V4_OPT: 1106 case mhd_SKT_IP_V4_WITH_FALLBACK: 1107 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1108 break; 1109 case mhd_SKT_NO_SOCKET: 1110 default: 1111 mhd_UNREACHABLE (); 1112 return MHD_SC_INTERNAL_ERROR; 1113 } 1114 d->net.listen.non_block = is_non_block; 1115 d->net.listen.port = sk_port; 1116 1117 mhd_assert (ret == MHD_SC_OK); 1118 1119 return MHD_SC_OK; 1120 1121 } while (0); 1122 1123 mhd_assert (MHD_SC_OK != ret); /* This should be only error returns here */ 1124 mhd_assert (MHD_INVALID_SOCKET != sk); 1125 (void) mhd_socket_close (sk); 1126 return ret; 1127 } 1128 1129 1130 /** 1131 * Create socket, bind to the address and start listening on the socket. 1132 * 1133 * The socket is assigned to the daemon as listening FD. 1134 * 1135 * @param d the daemon to use 1136 * @param s the user settings 1137 * @return #MHD_SC_OK on success, 1138 * the error code otherwise (no error printed to log if result is 1139 * #MHD_SC_LISTEN_SOCKET_BIND_FAILED or #MHD_SC_LISTEN_FAILURE) 1140 */ 1141 static enum MHD_StatusCode 1142 create_bind_listen_stream_socket (struct MHD_Daemon *restrict d, 1143 struct DaemonOptions *restrict s) 1144 { 1145 enum MHD_StatusCode ret; 1146 1147 ret = create_bind_listen_stream_socket_inner (d, 1148 s, 1149 false, 1150 false, 1151 MHD_SC_OK); 1152 #ifdef MHD_SUPPORT_LOG_FUNCTIONALITY 1153 if (MHD_SC_LISTEN_SOCKET_BIND_FAILED == ret) 1154 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_BIND_FAILED, \ 1155 "Failed to bind the listen socket"); 1156 else if (MHD_SC_LISTEN_FAILURE == ret) 1157 mhd_LOG_MSG (d, MHD_SC_LISTEN_FAILURE, \ 1158 "Failed to start listening on the listen socket"); 1159 #endif /* MHD_SUPPORT_LOG_FUNCTIONALITY */ 1160 1161 return ret; 1162 } 1163 1164 1165 #ifdef MHD_USE_GETSOCKNAME 1166 /** 1167 * Detect and set the type and port of the listening socket 1168 * @param d the daemon to use 1169 */ 1170 static MHD_FN_PAR_NONNULL_ (1) void 1171 detect_listen_type_and_port (struct MHD_Daemon *restrict d) 1172 { 1173 union mhd_SockaddrAny sa_all; 1174 socklen_t sa_size; 1175 enum mhd_SocketType declared_type; 1176 1177 mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd); 1178 mhd_assert (0 == d->net.listen.port); 1179 memset (&sa_all, 0, sizeof(sa_all)); /* Actually not required */ 1180 sa_size = (socklen_t) sizeof(sa_all); 1181 1182 if (0 != getsockname (d->net.listen.fd, &(sa_all.sa), &sa_size)) 1183 { 1184 if (mhd_SOCKET_TYPE_IP == d->net.listen.type) 1185 mhd_LOG_MSG (d, MHD_SC_LISTEN_PORT_DETECT_FAILURE, \ 1186 "Failed to detect the port number on the listening socket"); 1187 return; 1188 } 1189 1190 declared_type = d->net.listen.type; 1191 if (0 == sa_size) 1192 { 1193 #ifndef __linux__ 1194 /* Used on some non-Linux platforms */ 1195 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1196 d->net.listen.port = 0; 1197 #else /* ! __linux__ */ 1198 (void) 0; 1199 #endif /* ! __linux__ */ 1200 } 1201 else 1202 { 1203 switch (sa_all.sa.sa_family) 1204 { 1205 case AF_INET: 1206 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1207 d->net.listen.port = (uint_least16_t) ntohs (sa_all.sa_i4.sin_port); 1208 break; 1209 #ifdef HAVE_INET6 1210 case AF_INET6: 1211 d->net.listen.type = mhd_SOCKET_TYPE_IP; 1212 d->net.listen.port = (uint_least16_t) ntohs (sa_all.sa_i6.sin6_port); 1213 break; 1214 #endif /* HAVE_INET6 */ 1215 #ifdef MHD_AF_UNIX 1216 case MHD_AF_UNIX: 1217 d->net.listen.type = mhd_SOCKET_TYPE_UNIX; 1218 d->net.listen.port = 0; 1219 break; 1220 #endif /* MHD_AF_UNIX */ 1221 default: 1222 d->net.listen.type = mhd_SOCKET_TYPE_UNKNOWN; 1223 d->net.listen.port = 0; 1224 break; 1225 } 1226 } 1227 1228 if ((declared_type != d->net.listen.type) 1229 && (mhd_SOCKET_TYPE_IP == declared_type)) 1230 mhd_LOG_MSG (d, MHD_SC_UNEXPECTED_SOCKET_ERROR, \ 1231 "The type of listen socket is detected as non-IP, while " \ 1232 "the socket has been created as an IP socket"); 1233 } 1234 1235 1236 #else 1237 # define detect_listen_type_and_port(d) ((void) d) 1238 #endif 1239 1240 1241 #ifdef MHD_SUPPORT_EPOLL 1242 1243 /** 1244 * Initialise daemon's epoll FD 1245 */ 1246 static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 1247 init_epoll (struct MHD_Daemon *restrict d, 1248 bool log_failures) 1249 { 1250 int e_fd; 1251 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1252 mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \ 1253 ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) && \ 1254 (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int)))); 1255 mhd_assert ((! d->dbg.net_inited) || \ 1256 (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))); 1257 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1258 (NULL == d->events.data.epoll.events)); 1259 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1260 (MHD_INVALID_SOCKET == d->events.data.epoll.e_fd)); 1261 #ifdef HAVE_EPOLL_CREATE1 1262 e_fd = epoll_create1 (EPOLL_CLOEXEC); 1263 #else /* ! HAVE_EPOLL_CREATE1 */ 1264 e_fd = epoll_create (128); /* The number is usually ignored */ 1265 if (0 <= e_fd) 1266 { 1267 if (! mhd_socket_noninheritable (e_fd)) 1268 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_CONFIGURE_NOINHERIT_FAILED, \ 1269 "Failed to make epoll control FD non-inheritable"); 1270 } 1271 #endif /* ! HAVE_EPOLL_CREATE1 */ 1272 if (0 > e_fd) 1273 { 1274 if (log_failures) 1275 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_CREATE_FAILED, \ 1276 "Failed to create epoll control FD"); 1277 return MHD_SC_EPOLL_CTL_CREATE_FAILED; /* Failure exit point */ 1278 } 1279 1280 if (! mhd_FD_FITS_DAEMON (d, e_fd)) 1281 { 1282 if (log_failures) 1283 mhd_LOG_MSG (d, MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE, \ 1284 "The epoll control FD value is higher than allowed"); 1285 (void) close (e_fd); 1286 return MHD_SC_EPOLL_CTL_OUTSIDE_OF_SET_RANGE; /* Failure exit point */ 1287 } 1288 1289 d->events.poll_type = mhd_POLL_TYPE_EPOLL; 1290 d->events.data.epoll.e_fd = e_fd; 1291 d->events.data.epoll.events = NULL; /* Memory allocated during event and threads init */ 1292 d->events.data.epoll.num_elements = 0; 1293 return MHD_SC_OK; /* Success exit point */ 1294 } 1295 1296 1297 /** 1298 * Deinitialise daemon's epoll FD 1299 */ 1300 MHD_FN_PAR_NONNULL_ (1) static void 1301 deinit_epoll (struct MHD_Daemon *restrict d) 1302 { 1303 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1304 /* With thread pool the epoll control FD could be migrated to the 1305 * first worker daemon. */ 1306 mhd_assert ((MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) || \ 1307 (mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))); 1308 mhd_assert ((MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) || \ 1309 (mhd_D_HAS_WORKERS (d))); 1310 if (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd) 1311 close (d->events.data.epoll.e_fd); 1312 } 1313 1314 1315 #endif /* MHD_SUPPORT_EPOLL */ 1316 1317 /** 1318 * Choose sockets monitoring syscall and pre-initialise it 1319 * @param d the daemon object 1320 * @param s the user settings 1321 * @return #MHD_SC_OK on success, 1322 * the error code otherwise 1323 */ 1324 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1325 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1326 daemon_choose_and_preinit_events (struct MHD_Daemon *restrict d, 1327 struct DaemonOptions *restrict s) 1328 { 1329 enum mhd_IntPollType chosen_type; 1330 1331 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type); 1332 1333 mhd_assert ((mhd_WM_INT_EXTERNAL_EVENTS_EDGE != d->wmode_int) || \ 1334 (mhd_WM_INT_EXTERNAL_EVENTS_LEVEL != d->wmode_int) || \ 1335 (MHD_SPS_AUTO == s->poll_syscall)); 1336 1337 mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \ 1338 (mhd_WM_INT_EXTERNAL_EVENTS_EDGE == d->wmode_int) || \ 1339 (mhd_WM_INT_EXTERNAL_EVENTS_LEVEL == d->wmode_int) || \ 1340 (MHD_WM_EXTERNAL_SINGLE_FD_WATCH == s->work_mode.mode)); 1341 mhd_assert ((mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) || \ 1342 (d->events.poll_type == (enum mhd_IntPollType) s->poll_syscall) \ 1343 || ((MHD_SPS_AUTO == s->poll_syscall) && \ 1344 ((mhd_POLL_TYPE_EXT == d->events.poll_type) || \ 1345 mhd_POLL_TYPE_INT_IS_EPOLL (d->events.poll_type)))); 1346 1347 /* Check whether the provided parameter is in the range of expected values. 1348 Reject unsupported or disabled values. */ 1349 switch (s->poll_syscall) 1350 { 1351 case MHD_SPS_AUTO: 1352 chosen_type = mhd_POLL_TYPE_NOT_SET_YET; 1353 break; 1354 case MHD_SPS_SELECT: 1355 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1356 #ifndef MHD_SUPPORT_SELECT 1357 mhd_LOG_MSG (d, MHD_SC_SELECT_SYSCALL_NOT_AVAILABLE, \ 1358 "'select()' is not supported by the platform or " \ 1359 "this MHD build"); 1360 return MHD_SC_SELECT_SYSCALL_NOT_AVAILABLE; 1361 #else /* MHD_SUPPORT_SELECT */ 1362 chosen_type = mhd_POLL_TYPE_SELECT; 1363 #endif /* MHD_SUPPORT_SELECT */ 1364 break; 1365 case MHD_SPS_POLL: 1366 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1367 #ifndef MHD_SUPPORT_POLL 1368 mhd_LOG_MSG (d, MHD_SC_POLL_SYSCALL_NOT_AVAILABLE, \ 1369 "'poll()' is not supported by the platform or " \ 1370 "this MHD build"); 1371 return MHD_SC_POLL_SYSCALL_NOT_AVAILABLE; 1372 #else /* MHD_SUPPORT_POLL */ 1373 chosen_type = mhd_POLL_TYPE_POLL; 1374 #endif /* MHD_SUPPORT_POLL */ 1375 break; 1376 case MHD_SPS_EPOLL: 1377 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1378 #ifndef MHD_SUPPORT_EPOLL 1379 mhd_LOG_MSG (d, MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE, \ 1380 "'epoll' is not supported by the platform or " \ 1381 "this MHD build"); 1382 return MHD_SC_EPOLL_SYSCALL_NOT_AVAILABLE; 1383 #else /* MHD_SUPPORT_EPOLL */ 1384 chosen_type = mhd_POLL_TYPE_EPOLL; 1385 #endif /* MHD_SUPPORT_EPOLL */ 1386 break; 1387 default: 1388 mhd_LOG_MSG (d, MHD_SC_CONFIGURATION_UNEXPECTED_SPS, 1389 "Wrong socket polling syscall specified"); 1390 return MHD_SC_CONFIGURATION_UNEXPECTED_SPS; 1391 } 1392 1393 mhd_assert (mhd_POLL_TYPE_EXT != chosen_type); 1394 1395 if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1396 { 1397 if (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)) 1398 chosen_type = mhd_POLL_TYPE_EXT; 1399 } 1400 1401 #ifdef MHD_SUPPORT_EPOLL 1402 /* Try 'epoll' if needed or possible */ 1403 if ((mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1404 || (mhd_POLL_TYPE_EPOLL == chosen_type)) 1405 { 1406 bool epoll_required; 1407 bool epoll_allowed; 1408 1409 epoll_required = false; 1410 if (mhd_POLL_TYPE_EPOLL == chosen_type) 1411 { 1412 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1413 epoll_required = true; 1414 } 1415 else if (MHD_WM_EXTERNAL_SINGLE_FD_WATCH == s->work_mode.mode) 1416 { 1417 mhd_assert (! mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)); 1418 epoll_required = true; 1419 } 1420 1421 epoll_allowed = true; 1422 if (mhd_WM_INT_IS_THREAD_PER_CONN (d->wmode_int)) 1423 { 1424 mhd_assert (! epoll_required); 1425 epoll_allowed = false; 1426 } 1427 # ifdef MHD_SUPPORT_HTTPS 1428 else if (MHD_TLS_BACKEND_NONE != s->tls) 1429 { 1430 if (! epoll_required) 1431 epoll_allowed = mhd_tls_is_edge_trigg_supported (s); 1432 /* If 'epoll' is required, but TLS backend does not support it, 1433 then continue with 'epoll' here and fail at TLS initialisation. */ 1434 /* TODO: fail here */ 1435 } 1436 # endif /* MHD_SUPPORT_HTTPS */ 1437 1438 mhd_assert (epoll_allowed || ! epoll_required); 1439 1440 if (epoll_allowed) 1441 { 1442 enum MHD_StatusCode epoll_res; 1443 1444 epoll_res = init_epoll (d, 1445 epoll_required); 1446 if (MHD_SC_OK == epoll_res) 1447 chosen_type = mhd_POLL_TYPE_EPOLL; 1448 else 1449 { 1450 if (epoll_required) 1451 return epoll_res; 1452 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET == chosen_type); 1453 } 1454 } 1455 else 1456 mhd_assert (mhd_POLL_TYPE_EPOLL != chosen_type); 1457 } 1458 mhd_assert ((mhd_POLL_TYPE_EPOLL != d->events.poll_type) || \ 1459 (0 < d->events.data.epoll.e_fd)); 1460 mhd_assert ((mhd_POLL_TYPE_EPOLL == d->events.poll_type) == \ 1461 (mhd_POLL_TYPE_EPOLL == chosen_type)); 1462 #endif /* ! MHD_SUPPORT_EPOLL */ 1463 1464 if (mhd_POLL_TYPE_NOT_SET_YET == chosen_type) 1465 { 1466 #if defined(MHD_SUPPORT_POLL) 1467 chosen_type = mhd_POLL_TYPE_POLL; 1468 #elif defined(MHD_SUPPORT_SELECT) 1469 chosen_type = mhd_POLL_TYPE_SELECT; 1470 #else 1471 mhd_LOG_MSG (d, MHD_SC_FEATURE_DISABLED, \ 1472 "All suitable internal sockets polling technologies are " \ 1473 "disabled in this MHD build"); 1474 return MHD_SC_FEATURE_DISABLED; 1475 #endif 1476 } 1477 1478 switch (chosen_type) 1479 { 1480 case mhd_POLL_TYPE_EXT: 1481 mhd_assert ((MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL == s->work_mode.mode) || \ 1482 (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE == s->work_mode.mode)); 1483 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1484 d->events.poll_type = mhd_POLL_TYPE_EXT; 1485 d->events.data.extr.cb_data.cb = 1486 s->work_mode.params.v_external_event_loop_cb.reg_cb; 1487 d->events.data.extr.cb_data.cls = 1488 s->work_mode.params.v_external_event_loop_cb.reg_cb_cls; 1489 d->events.data.extr.reg_all = (MHD_NO != s->reregister_all); 1490 #ifdef MHD_SUPPORT_THREADS 1491 d->events.data.extr.itc_data.app_cntx = NULL; 1492 #endif /* MHD_SUPPORT_THREADS */ 1493 d->events.data.extr.listen_data.app_cntx = NULL; 1494 break; 1495 #ifdef MHD_SUPPORT_SELECT 1496 case mhd_POLL_TYPE_SELECT: 1497 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1498 mhd_assert (MHD_WM_EXTERNAL_SINGLE_FD_WATCH != s->work_mode.mode); 1499 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1500 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1501 mhd_assert (MHD_NO == s->reregister_all); 1502 d->events.poll_type = mhd_POLL_TYPE_SELECT; 1503 d->events.data.select.rfds = NULL; /* Memory allocated during event and threads init */ 1504 d->events.data.select.wfds = NULL; /* Memory allocated during event and threads init */ 1505 d->events.data.select.efds = NULL; /* Memory allocated during event and threads init */ 1506 break; 1507 #endif /* MHD_SUPPORT_SELECT */ 1508 #ifdef MHD_SUPPORT_POLL 1509 case mhd_POLL_TYPE_POLL: 1510 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1511 mhd_assert (MHD_WM_EXTERNAL_SINGLE_FD_WATCH != s->work_mode.mode); 1512 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1513 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1514 mhd_assert (MHD_NO == s->reregister_all); 1515 d->events.poll_type = mhd_POLL_TYPE_POLL; 1516 d->events.data.poll.fds = NULL; /* Memory allocated during event and threads init */ 1517 d->events.data.poll.rel = NULL; /* Memory allocated during event and threads init */ 1518 break; 1519 #endif /* MHD_SUPPORT_POLL */ 1520 #ifdef MHD_SUPPORT_EPOLL 1521 case mhd_POLL_TYPE_EPOLL: 1522 mhd_assert (! mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1523 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_LEVEL != s->work_mode.mode); 1524 mhd_assert (MHD_WM_EXTERNAL_EVENT_LOOP_CB_EDGE != s->work_mode.mode); 1525 mhd_assert (MHD_NO == s->reregister_all); 1526 /* Pre-initialised by init_epoll() */ 1527 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1528 mhd_assert (0 <= d->events.data.epoll.e_fd); 1529 mhd_assert (NULL == d->events.data.epoll.events); 1530 break; 1531 #endif /* MHD_SUPPORT_EPOLL */ 1532 #ifndef MHD_SUPPORT_SELECT 1533 case mhd_POLL_TYPE_SELECT: 1534 #endif /* ! MHD_SUPPORT_SELECT */ 1535 #ifndef MHD_SUPPORT_POLL 1536 case mhd_POLL_TYPE_POLL: 1537 #endif /* ! MHD_SUPPORT_POLL */ 1538 case mhd_POLL_TYPE_NOT_SET_YET: 1539 default: 1540 mhd_UNREACHABLE (); 1541 return MHD_SC_INTERNAL_ERROR; 1542 } 1543 return MHD_SC_OK; 1544 } 1545 1546 1547 /** 1548 * Initialise network/sockets for the daemon. 1549 * Also choose events mode / sockets polling syscall. 1550 * @param d the daemon object 1551 * @param s the user settings 1552 * @return #MHD_SC_OK on success, 1553 * the error code otherwise 1554 */ 1555 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1556 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1557 daemon_init_net (struct MHD_Daemon *restrict d, 1558 struct DaemonOptions *restrict s) 1559 { 1560 enum MHD_StatusCode ret; 1561 1562 mhd_assert (! d->dbg.net_inited); 1563 mhd_assert (! d->dbg.net_deinited); 1564 #ifdef MHD_SOCKETS_KIND_POSIX 1565 d->net.cfg.max_fd_num = s->fd_number_limit; 1566 #endif /* MHD_SOCKETS_KIND_POSIX */ 1567 1568 ret = daemon_choose_and_preinit_events (d, s); 1569 if (MHD_SC_OK != ret) 1570 return ret; 1571 1572 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 1573 1574 /* No direct return of error codes is allowed beyond this point. 1575 Deinit/cleanup must be performed before return of any error. */ 1576 1577 #if defined(MHD_SOCKETS_KIND_POSIX) && defined(MHD_SUPPORT_SELECT) 1578 if (mhd_POLL_TYPE_SELECT == d->events.poll_type) 1579 { 1580 if ((MHD_INVALID_SOCKET == d->net.cfg.max_fd_num) || 1581 (FD_SETSIZE < d->net.cfg.max_fd_num)) 1582 d->net.cfg.max_fd_num = FD_SETSIZE; 1583 } 1584 #endif /* MHD_SOCKETS_KIND_POSIX && MHD_SUPPORT_SELECT */ 1585 1586 if (MHD_SC_OK == ret) 1587 { 1588 ret = create_bind_listen_stream_socket (d, s); 1589 1590 if (MHD_SC_OK == ret) 1591 { 1592 if ((MHD_INVALID_SOCKET != d->net.listen.fd) 1593 && ! d->net.listen.non_block 1594 && (mhd_D_IS_USING_EDGE_TRIG (d) || 1595 mhd_WM_INT_IS_THREAD_POOL (d->wmode_int))) 1596 { 1597 mhd_LOG_MSG (d, MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE, \ 1598 "The selected daemon work mode requires listening socket " 1599 "in non-blocking mode"); 1600 ret = MHD_SC_LISTEN_SOCKET_NONBLOCKING_FAILURE; 1601 } 1602 1603 if (MHD_SC_OK == ret) 1604 { 1605 if ((MHD_INVALID_SOCKET != d->net.listen.fd) && 1606 ((0 == d->net.listen.port) || 1607 (mhd_SOCKET_TYPE_UNKNOWN == d->net.listen.type))) 1608 detect_listen_type_and_port (d); 1609 1610 #ifndef NDEBUG 1611 d->dbg.net_inited = true; 1612 #endif 1613 return MHD_SC_OK; /* Success exit point */ 1614 } 1615 1616 /* Below is a cleanup path */ 1617 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1618 mhd_socket_close (d->net.listen.fd); 1619 } 1620 } 1621 1622 #ifdef MHD_SUPPORT_EPOLL 1623 if ((mhd_POLL_TYPE_EPOLL == d->events.poll_type)) 1624 close (d->events.data.epoll.e_fd); 1625 #endif /* MHD_SUPPORT_EPOLL */ 1626 1627 mhd_assert (MHD_SC_OK != ret); 1628 1629 return ret; 1630 } 1631 1632 1633 /** 1634 * Deinitialise daemon's network data 1635 * @param d the daemon object 1636 */ 1637 MHD_FN_PAR_NONNULL_ (1) static void 1638 daemon_deinit_net (struct MHD_Daemon *restrict d) 1639 { 1640 mhd_assert (d->dbg.net_inited); 1641 mhd_assert (! d->dbg.net_deinited); 1642 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 1643 #ifdef MHD_SUPPORT_EPOLL 1644 if (mhd_POLL_TYPE_EPOLL == d->events.poll_type) 1645 deinit_epoll (d); 1646 #endif /* MHD_SUPPORT_EPOLL */ 1647 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1648 mhd_socket_close (d->net.listen.fd); 1649 1650 #ifndef NDEBUG 1651 d->dbg.net_deinited = true; 1652 #endif 1653 } 1654 1655 1656 #if 0 1657 void 1658 dauth_init (struct MHD_Daemon *restrict d, 1659 struct DaemonOptions *restrict s) 1660 { 1661 mhd_assert ((NULL == s->random_entropy.v_buf) || \ 1662 (0 != s->random_entropy.v_buf_size)); 1663 mhd_assert ((0 == s->random_entropy.v_buf_size) || \ 1664 (NULL != s->random_entropy.v_buf)); 1665 } 1666 1667 1668 #endif 1669 1670 #ifdef MHD_SUPPORT_AUTH_DIGEST 1671 /** 1672 * Initialise daemon Digest Auth data 1673 * @param d the daemon object 1674 * @param s the user settings 1675 * @return #MHD_SC_OK on success, 1676 * the error code otherwise 1677 */ 1678 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1679 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1680 daemon_init_auth_digest (struct MHD_Daemon *restrict d, 1681 struct DaemonOptions *restrict s) 1682 { 1683 enum MHD_StatusCode ret; 1684 size_t nonces_num; 1685 1686 if (0 == s->random_entropy.v_buf_size) 1687 { 1688 /* No initialisation needed */ 1689 #ifndef HAVE_NULL_PTR_ALL_ZEROS 1690 d->auth_dg.entropy.data = NULL; 1691 d->auth_dg.nonces = NULL; 1692 #endif 1693 return MHD_SC_OK; 1694 } 1695 nonces_num = s->auth_digest_map_size; 1696 if (0 == nonces_num) 1697 nonces_num = 1000; 1698 d->auth_dg.nonces = (struct mhd_DaemonAuthDigestNonceData *) 1699 mhd_calloc (nonces_num, \ 1700 sizeof(struct mhd_DaemonAuthDigestNonceData)); 1701 if (NULL == d->auth_dg.nonces) 1702 { 1703 mhd_LOG_MSG (d, \ 1704 MHD_SC_DAEMON_MEM_ALLOC_FAILURE, \ 1705 "Failed to allocate memory for Digest Auth array"); 1706 return MHD_SC_DAEMON_MEM_ALLOC_FAILURE; 1707 } 1708 d->auth_dg.cfg.nonces_num = nonces_num; 1709 1710 if (! mhd_mutex_init (&(d->auth_dg.nonces_lock))) 1711 { 1712 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1713 "Failed to initialise mutex for the Digest Auth data"); 1714 ret = MHD_SC_MUTEX_INIT_FAILURE; 1715 } 1716 else 1717 { 1718 if (! mhd_atomic_counter_init (&(d->auth_dg.num_gen_nonces), 0)) 1719 { 1720 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1721 "Failed to initialise mutex for the Digest Auth data"); 1722 ret = MHD_SC_MUTEX_INIT_FAILURE; 1723 } 1724 else 1725 { 1726 /* Move ownership of the entropy buffer */ 1727 d->auth_dg.entropy.data = (char *) s->random_entropy.v_buf; 1728 d->auth_dg.entropy.size = s->random_entropy.v_buf_size; 1729 s->random_entropy.v_buf = NULL; 1730 s->random_entropy.v_buf_size = 0; 1731 1732 d->auth_dg.cfg.nonce_tmout = s->auth_digest_nonce_timeout; 1733 if (0 == d->auth_dg.cfg.nonce_tmout) 1734 d->auth_dg.cfg.nonce_tmout = MHD_AUTH_DIGEST_DEF_TIMEOUT; 1735 d->auth_dg.cfg.def_max_nc = s->auth_digest_def_max_nc; 1736 if (0 == d->auth_dg.cfg.def_max_nc) 1737 d->auth_dg.cfg.def_max_nc = MHD_AUTH_DIGEST_DEF_MAX_NC; 1738 1739 return MHD_SC_OK; /* Success exit point */ 1740 } 1741 mhd_mutex_destroy_chk (&(d->auth_dg.nonces_lock)); 1742 } 1743 1744 free (d->auth_dg.nonces); 1745 mhd_assert (MHD_SC_OK != ret); 1746 return ret; /* Failure exit point */ 1747 } 1748 1749 1750 /** 1751 * Deinitialise daemon Digest Auth data 1752 * @param d the daemon object 1753 */ 1754 MHD_FN_PAR_NONNULL_ (1) static void 1755 daemon_deinit_auth_digest (struct MHD_Daemon *restrict d) 1756 { 1757 if (0 == d->auth_dg.entropy.size) 1758 return; /* Digest Auth not used, nothing to deinitialise */ 1759 1760 mhd_assert (NULL != d->auth_dg.entropy.data); 1761 free (d->auth_dg.entropy.data); 1762 mhd_atomic_counter_deinit (&(d->auth_dg.num_gen_nonces)); 1763 mhd_mutex_destroy_chk (&(d->auth_dg.nonces_lock)); 1764 mhd_assert (NULL != d->auth_dg.nonces); 1765 free (d->auth_dg.nonces); 1766 } 1767 1768 1769 #else /* MHD_SUPPORT_AUTH_DIGEST */ 1770 #define daemon_init_auth_digest(d,s) (MHD_SC_OK) 1771 #define daemon_deinit_auth_digest(d) ((void) 0) 1772 #endif /* MHD_SUPPORT_AUTH_DIGEST */ 1773 1774 1775 /** 1776 * Initialise daemon TLS data 1777 * @param d the daemon object 1778 * @param s the user settings 1779 * @return #MHD_SC_OK on success, 1780 * the error code otherwise 1781 */ 1782 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1783 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1784 daemon_init_tls (struct MHD_Daemon *restrict d, 1785 struct DaemonOptions *restrict s) 1786 { 1787 #ifdef MHD_SUPPORT_HTTPS 1788 mhd_StatusCodeInt ret; 1789 #endif /* MHD_SUPPORT_HTTPS */ 1790 1791 mhd_assert (! d->dbg.tls_inited); 1792 #ifdef MHD_SUPPORT_HTTPS 1793 d->tls = NULL; 1794 #endif 1795 1796 if (MHD_TLS_BACKEND_NONE == s->tls) 1797 { 1798 #ifndef NDEBUG 1799 d->dbg.tls_inited = true; 1800 #endif 1801 return MHD_SC_OK; 1802 } 1803 #ifndef MHD_SUPPORT_HTTPS 1804 mhd_LOG_MSG (d, \ 1805 MHD_SC_TLS_DISABLED, \ 1806 "HTTPS is not supported by this MHD build"); 1807 return MHD_SC_TLS_DISABLED; 1808 #else /* MHD_SUPPORT_HTTPS */ 1809 if (1) 1810 { 1811 enum mhd_TlsBackendAvailable tls_avail; 1812 1813 tls_avail = mhd_tls_is_backend_available (s); 1814 if (mhd_TLS_BACKEND_AVAIL_NOT_SUPPORTED == tls_avail) 1815 { 1816 mhd_LOG_MSG (d, \ 1817 MHD_SC_TLS_BACKEND_UNSUPPORTED, \ 1818 "The requested TLS backend is not supported " \ 1819 "by this MHD build"); 1820 return MHD_SC_TLS_BACKEND_UNSUPPORTED; 1821 } 1822 else if (mhd_TLS_BACKEND_AVAIL_NOT_AVAILABLE == tls_avail) 1823 { 1824 mhd_LOG_MSG (d, \ 1825 MHD_SC_TLS_BACKEND_UNAVAILABLE, \ 1826 "The requested TLS backend is not available"); 1827 return MHD_SC_TLS_BACKEND_UNAVAILABLE; 1828 } 1829 } 1830 ret = mhd_tls_daemon_init (d, 1831 mhd_D_HAS_EDGE_TRIGG (d), 1832 s, 1833 &(d->tls)); 1834 mhd_assert ((MHD_SC_OK == ret) || (NULL == d->tls)); 1835 mhd_assert ((MHD_SC_OK != ret) || (NULL != d->tls)); 1836 #ifndef NDEBUG 1837 d->dbg.tls_inited = (MHD_SC_OK == ret); 1838 #endif 1839 return (enum MHD_StatusCode) ret; 1840 #endif /* MHD_SUPPORT_HTTPS */ 1841 } 1842 1843 1844 /** 1845 * Deinitialise daemon TLS data 1846 * @param d the daemon object 1847 */ 1848 MHD_FN_PAR_NONNULL_ (1) static void 1849 daemon_deinit_tls (struct MHD_Daemon *restrict d) 1850 { 1851 mhd_assert (d->dbg.tls_inited); 1852 #ifdef MHD_SUPPORT_HTTPS 1853 if (NULL != d->tls) 1854 { 1855 mhd_tls_thread_cleanup (d->tls); 1856 mhd_tls_daemon_deinit (d->tls); 1857 } 1858 #elif defined(NDEBUG) 1859 (void) d; /* Mute compiler warning */ 1860 #endif 1861 } 1862 1863 1864 /** 1865 * Initialise large buffer tracking. 1866 * @param d the daemon object 1867 * @param s the user settings 1868 * @return #MHD_SC_OK on success, 1869 * the error code otherwise 1870 */ 1871 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) \ 1872 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1873 daemon_init_large_buf (struct MHD_Daemon *restrict d, 1874 struct DaemonOptions *restrict s) 1875 { 1876 mhd_assert (! mhd_D_HAS_MASTER (d)); 1877 mhd_assert (0 != d->conns.cfg.count_limit); 1878 mhd_assert (0 != d->conns.cfg.mem_pool_size); 1879 1880 d->req_cfg.large_buf.space_left = s->large_pool_size; 1881 if (SIZE_MAX == d->req_cfg.large_buf.space_left) 1882 d->req_cfg.large_buf.space_left = 1883 (d->conns.cfg.count_limit * d->conns.cfg.mem_pool_size) / 32; /* Use ~3% of the maximum memory used by connections */ 1884 1885 #ifndef NDEBUG 1886 d->dbg.initial_lbuf_size = d->req_cfg.large_buf.space_left; 1887 #endif 1888 1889 if (! mhd_mutex_init_short (&(d->req_cfg.large_buf.lock))) 1890 { 1891 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 1892 "Failed to initialise mutex for the global large buffer."); 1893 return MHD_SC_MUTEX_INIT_FAILURE; 1894 } 1895 return MHD_SC_OK; 1896 } 1897 1898 1899 /** 1900 * Deinitialise large buffer tracking. 1901 * @param d the daemon object 1902 */ 1903 static MHD_FN_PAR_NONNULL_ (1) void 1904 daemon_deinit_large_buf (struct MHD_Daemon *restrict d) 1905 { 1906 /* All large buffer allocations must be freed / deallocated earlier */ 1907 mhd_assert (d->dbg.initial_lbuf_size == d->req_cfg.large_buf.space_left); 1908 mhd_mutex_destroy_chk (&(d->req_cfg.large_buf.lock)); 1909 } 1910 1911 1912 /** 1913 * Finish initialisation of events processing 1914 * @param d the daemon object 1915 * @return #MHD_SC_OK on success, 1916 * the error code otherwise 1917 */ 1918 static MHD_FN_PAR_NONNULL_ (1) 1919 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 1920 allocate_events (struct MHD_Daemon *restrict d) 1921 { 1922 #if defined(MHD_SUPPORT_POLL) || defined(MHD_SUPPORT_EPOLL) 1923 /** 1924 * The number of elements to be monitored by sockets polling function 1925 */ 1926 unsigned int num_elements; 1927 num_elements = 0; 1928 #ifdef MHD_SUPPORT_THREADS 1929 ++num_elements; /* For the ITC */ 1930 #endif 1931 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1932 ++num_elements; /* For the listening socket */ 1933 if (! mhd_D_HAS_THR_PER_CONN (d)) 1934 num_elements += d->conns.cfg.count_limit; 1935 #endif /* MHD_SUPPORT_POLL || MHD_SUPPORT_EPOLL */ 1936 1937 mhd_assert (0 != d->conns.cfg.count_limit); 1938 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 1939 1940 mhd_DLINKEDL_INIT_LIST (&(d->events),proc_ready); 1941 1942 switch (d->events.poll_type) 1943 { 1944 case mhd_POLL_TYPE_EXT: 1945 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 1946 /* Nothing to do: allocation is not needed */ 1947 #ifndef NDEBUG 1948 d->dbg.events_allocated = true; 1949 #endif 1950 return MHD_SC_OK; /* Success exit point */ 1951 break; 1952 #ifdef MHD_SUPPORT_SELECT 1953 case mhd_POLL_TYPE_SELECT: 1954 /* The pointers have been set to NULL during pre-initialisations of the events */ 1955 mhd_assert (NULL == d->events.data.select.rfds); 1956 mhd_assert (NULL == d->events.data.select.wfds); 1957 mhd_assert (NULL == d->events.data.select.efds); 1958 d->events.data.select.rfds = (fd_set *) malloc (sizeof(fd_set)); 1959 if (NULL != d->events.data.select.rfds) 1960 { 1961 d->events.data.select.wfds = (fd_set *) malloc (sizeof(fd_set)); 1962 if (NULL != d->events.data.select.wfds) 1963 { 1964 d->events.data.select.efds = (fd_set *) malloc (sizeof(fd_set)); 1965 if (NULL != d->events.data.select.efds) 1966 { 1967 #ifndef NDEBUG 1968 d->dbg.num_events_elements = FD_SETSIZE; 1969 d->dbg.events_allocated = true; 1970 #endif 1971 return MHD_SC_OK; /* Success exit point */ 1972 } 1973 1974 free (d->events.data.select.wfds); 1975 } 1976 free (d->events.data.select.rfds); 1977 } 1978 mhd_LOG_MSG (d, MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE, \ 1979 "Failed to allocate memory for fd_sets for the daemon"); 1980 return MHD_SC_FD_SET_MEMORY_ALLOCATE_FAILURE; 1981 break; 1982 #endif /* MHD_SUPPORT_SELECT */ 1983 #ifdef MHD_SUPPORT_POLL 1984 case mhd_POLL_TYPE_POLL: 1985 /* The pointers have been set to NULL during pre-initialisations of the events */ 1986 mhd_assert (NULL == d->events.data.poll.fds); 1987 mhd_assert (NULL == d->events.data.poll.rel); 1988 if ((num_elements > d->conns.cfg.count_limit) /* Check for value overflow */ 1989 || (mhd_D_HAS_THR_PER_CONN (d))) 1990 { 1991 d->events.data.poll.fds = 1992 (struct pollfd *) malloc (sizeof(struct pollfd) * num_elements); 1993 if (NULL != d->events.data.poll.fds) 1994 { 1995 d->events.data.poll.rel = 1996 (union mhd_SocketRelation *) malloc (sizeof(union mhd_SocketRelation) 1997 * num_elements); 1998 if (NULL != d->events.data.poll.rel) 1999 { 2000 #ifndef NDEBUG 2001 d->dbg.num_events_elements = num_elements; 2002 d->dbg.events_allocated = true; 2003 #endif 2004 return MHD_SC_OK; /* Success exit point */ 2005 } 2006 free (d->events.data.poll.fds); 2007 } 2008 } 2009 mhd_LOG_MSG (d, MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE, \ 2010 "Failed to allocate memory for poll fds for the daemon"); 2011 return MHD_SC_POLL_FDS_MEMORY_ALLOCATE_FAILURE; 2012 break; 2013 #endif /* MHD_SUPPORT_POLL */ 2014 #ifdef MHD_SUPPORT_EPOLL 2015 case mhd_POLL_TYPE_EPOLL: 2016 mhd_assert (! mhd_D_HAS_THR_PER_CONN (d)); 2017 /* The event FD has been created during pre-initialisations of the events */ 2018 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd); 2019 /* The pointer has been set to NULL during pre-initialisations of the events */ 2020 mhd_assert (NULL == d->events.data.epoll.events); 2021 mhd_assert (0 == d->events.data.epoll.num_elements); 2022 if ((num_elements > d->conns.cfg.count_limit) /* Check for value overflow */ 2023 || (mhd_D_HAS_THR_PER_CONN (d))) 2024 { 2025 const unsigned int upper_limit = (sizeof(void*) >= 8) ? 4096u : 1024u; 2026 2027 mhd_assert (0 < (int) upper_limit); 2028 mhd_assert (upper_limit == (unsigned int) (size_t) upper_limit); 2029 2030 /* Trade neglectable performance penalty for memory saving */ 2031 /* Very large amount of new events processed in batches */ 2032 if (num_elements > upper_limit) 2033 num_elements = upper_limit; 2034 2035 d->events.data.epoll.events = 2036 (struct epoll_event *) malloc (sizeof(struct epoll_event) 2037 * num_elements); 2038 if (NULL != d->events.data.epoll.events) 2039 { 2040 d->events.data.epoll.num_elements = num_elements; 2041 #ifndef NDEBUG 2042 d->dbg.num_events_elements = num_elements; 2043 d->dbg.events_allocated = true; 2044 #endif 2045 return MHD_SC_OK; /* Success exit point */ 2046 } 2047 } 2048 mhd_LOG_MSG (d, MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE, \ 2049 "Failed to allocate memory for epoll events for the daemon"); 2050 return MHD_SC_EPOLL_EVENTS_MEMORY_ALLOCATE_FAILURE; 2051 break; 2052 #endif /* MHD_SUPPORT_EPOLL */ 2053 #ifndef MHD_SUPPORT_SELECT 2054 case mhd_POLL_TYPE_SELECT: 2055 #endif /* ! MHD_SUPPORT_SELECT */ 2056 #ifndef MHD_SUPPORT_POLL 2057 case mhd_POLL_TYPE_POLL: 2058 #endif /* ! MHD_SUPPORT_POLL */ 2059 case mhd_POLL_TYPE_NOT_SET_YET: 2060 default: 2061 mhd_UNREACHABLE (); 2062 break; 2063 } 2064 mhd_UNREACHABLE (); 2065 return MHD_SC_INTERNAL_ERROR; 2066 } 2067 2068 2069 /** 2070 * Deallocate events data 2071 * @param d the daemon object 2072 */ 2073 static MHD_FN_PAR_NONNULL_ (1) void 2074 deallocate_events (struct MHD_Daemon *restrict d) 2075 { 2076 mhd_assert (0 != d->conns.cfg.count_limit); 2077 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2078 if (mhd_POLL_TYPE_NOT_SET_YET == d->events.poll_type) 2079 { 2080 mhd_assert (0 && "Wrong workflow"); 2081 mhd_UNREACHABLE (); 2082 return; 2083 } 2084 #ifdef MHD_SUPPORT_SELECT 2085 else if (mhd_POLL_TYPE_SELECT == d->events.poll_type) 2086 { 2087 mhd_assert (NULL != d->events.data.select.efds); 2088 mhd_assert (NULL != d->events.data.select.wfds); 2089 mhd_assert (NULL != d->events.data.select.rfds); 2090 free (d->events.data.select.efds); 2091 free (d->events.data.select.wfds); 2092 free (d->events.data.select.rfds); 2093 } 2094 #endif /* MHD_SUPPORT_SELECT */ 2095 #ifdef MHD_SUPPORT_POLL 2096 else if (mhd_POLL_TYPE_POLL == d->events.poll_type) 2097 { 2098 mhd_assert (NULL != d->events.data.poll.rel); 2099 mhd_assert (NULL != d->events.data.poll.fds); 2100 free (d->events.data.poll.rel); 2101 free (d->events.data.poll.fds); 2102 } 2103 #endif /* MHD_SUPPORT_POLL */ 2104 #ifdef MHD_SUPPORT_EPOLL 2105 else if (mhd_POLL_TYPE_EPOLL == d->events.poll_type) 2106 { 2107 mhd_assert (0 != d->events.data.epoll.num_elements); 2108 mhd_assert (NULL != d->events.data.epoll.events); 2109 free (d->events.data.epoll.events); 2110 } 2111 #endif /* MHD_SUPPORT_EPOLL */ 2112 #ifndef NDEBUG 2113 d->dbg.events_allocated = false; 2114 #endif 2115 return; 2116 } 2117 2118 2119 /** 2120 * Initialise daemon's ITC 2121 * @param d the daemon object 2122 * @return #MHD_SC_OK on success, 2123 * the error code otherwise 2124 */ 2125 static MHD_FN_PAR_NONNULL_ (1) 2126 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2127 init_itc (struct MHD_Daemon *restrict d) 2128 { 2129 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2130 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2131 #ifdef MHD_SUPPORT_THREADS 2132 // TODO: add and process "thread unsafe" daemon's option 2133 if (! mhd_itc_init (&(d->threading.itc))) 2134 { 2135 #if defined(MHD_ITC_EVENTFD_) 2136 mhd_LOG_MSG ( \ 2137 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2138 "Failed to initialise eventFD for inter-thread communication"); 2139 #elif defined(MHD_ITC_PIPE_) 2140 mhd_LOG_MSG ( \ 2141 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2142 "Failed to create a pipe for inter-thread communication"); 2143 #elif defined(MHD_ITC_SOCKETPAIR_) 2144 mhd_LOG_MSG ( \ 2145 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2146 "Failed to create a socketpair for inter-thread communication"); 2147 #else 2148 #warning Missing expicit handling of the ITC type 2149 mhd_LOG_MSG ( \ 2150 d, MHD_SC_ITC_INITIALIZATION_FAILED, \ 2151 "Failed to initialise inter-thread communication"); 2152 #endif 2153 return MHD_SC_ITC_INITIALIZATION_FAILED; 2154 } 2155 if (! mhd_FD_FITS_DAEMON (d,mhd_itc_r_fd (d->threading.itc))) 2156 { 2157 mhd_LOG_MSG (d, MHD_SC_ITC_FD_OUTSIDE_OF_SET_RANGE, \ 2158 "The inter-thread communication FD value is " \ 2159 "higher than allowed"); 2160 (void) mhd_itc_destroy (d->threading.itc); 2161 mhd_itc_set_invalid (&(d->threading.itc)); 2162 return MHD_SC_ITC_FD_OUTSIDE_OF_SET_RANGE; 2163 } 2164 #else /* ! MHD_SUPPORT_THREADS */ 2165 (void) d; /* Unused */ 2166 #endif /* ! MHD_SUPPORT_THREADS */ 2167 return MHD_SC_OK; 2168 } 2169 2170 2171 /** 2172 * Deallocate events data 2173 * @param d the daemon object 2174 */ 2175 static MHD_FN_PAR_NONNULL_ (1) void 2176 deinit_itc (struct MHD_Daemon *restrict d) 2177 { 2178 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2179 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2180 #ifdef MHD_SUPPORT_THREADS 2181 // TODO: add and process "thread unsafe" daemon's option 2182 mhd_assert (! mhd_ITC_IS_INVALID (d->threading.itc)); 2183 (void) mhd_itc_destroy (d->threading.itc); 2184 #else /* ! MHD_SUPPORT_THREADS */ 2185 (void) d; /* Unused */ 2186 #endif /* ! MHD_SUPPORT_THREADS */ 2187 } 2188 2189 2190 /** 2191 * The final part of events initialisation: pre-add ITC and listening FD to 2192 * the monitored items (if supported by monitoring syscall). 2193 * @param d the daemon object 2194 * @return #MHD_SC_OK on success, 2195 * the error code otherwise 2196 */ 2197 static MHD_FN_PAR_NONNULL_ (1) 2198 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2199 init_daemon_fds_monitoring (struct MHD_Daemon *restrict d) 2200 { 2201 mhd_assert (d->dbg.net_inited); 2202 mhd_assert (! d->dbg.net_deinited); 2203 mhd_assert (d->dbg.events_allocated); 2204 mhd_assert (! d->dbg.events_fully_inited); 2205 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2206 #ifdef MHD_SUPPORT_THREADS 2207 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 2208 #endif 2209 2210 d->events.accept_pending = false; 2211 2212 switch (d->events.poll_type) 2213 { 2214 case mhd_POLL_TYPE_EXT: 2215 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 2216 #ifdef MHD_SUPPORT_THREADS 2217 d->events.data.extr.itc_data.is_active = false; 2218 d->events.data.extr.itc_data.is_broken = false; 2219 #endif /* MHD_SUPPORT_THREADS */ 2220 if (! d->events.data.extr.reg_all) 2221 { 2222 bool itc_reg_succeed; 2223 2224 /* Register daemon's FDs now */ 2225 #ifdef MHD_SUPPORT_THREADS 2226 d->events.data.extr.itc_data.app_cntx = 2227 mhd_daemon_extr_event_reg (d, 2228 mhd_itc_r_fd (d->threading.itc), 2229 MHD_FD_STATE_RECV_EXCEPT, 2230 NULL, 2231 (struct MHD_EventUpdateContext *) 2232 mhd_SOCKET_REL_MARKER_ITC); 2233 itc_reg_succeed = (NULL != d->events.data.extr.itc_data.app_cntx); 2234 #else /* ! MHD_SUPPORT_THREADS */ 2235 itc_reg_succeed = true; 2236 #endif /* ! MHD_SUPPORT_THREADS */ 2237 if (itc_reg_succeed) 2238 { 2239 if (MHD_INVALID_SOCKET == d->net.listen.fd) 2240 { 2241 d->events.data.extr.listen_data.app_cntx = NULL; 2242 return MHD_SC_OK; /* Success exit point */ 2243 } 2244 2245 /* Need to register the listen FD */ 2246 d->events.data.extr.listen_data.app_cntx = 2247 mhd_daemon_extr_event_reg (d, 2248 d->net.listen.fd, 2249 MHD_FD_STATE_RECV_EXCEPT, 2250 NULL, 2251 (struct MHD_EventUpdateContext *) 2252 mhd_SOCKET_REL_MARKER_LISTEN); 2253 if (NULL != d->events.data.extr.listen_data.app_cntx) 2254 return MHD_SC_OK; /* Success exit point */ 2255 2256 /* Below is a clean-up path for 'case mhd_POLL_TYPE_EXT:' */ 2257 #ifdef MHD_SUPPORT_THREADS 2258 /* De-register ITC FD */ 2259 (void) mhd_daemon_extr_event_reg (d, 2260 mhd_itc_r_fd (d->threading.itc), 2261 MHD_FD_STATE_NONE, 2262 d->events.data.extr.itc_data.app_cntx, 2263 (struct MHD_EventUpdateContext *) 2264 mhd_SOCKET_REL_MARKER_ITC); 2265 d->events.data.extr.itc_data.app_cntx = NULL; 2266 #endif /* MHD_SUPPORT_THREADS */ 2267 } 2268 2269 mhd_LOG_MSG (d, MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE, \ 2270 "Failed to register daemon FDs in the application " 2271 "(external events) monitoring."); 2272 return MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE; 2273 } 2274 else 2275 { 2276 /* Daemons FDs are repeatedly registered every processing cycle */ 2277 #ifdef MHD_SUPPORT_THREADS 2278 d->events.data.extr.itc_data.app_cntx = NULL; 2279 #endif /* MHD_SUPPORT_THREADS */ 2280 d->events.data.extr.listen_data.app_cntx = NULL; 2281 return MHD_SC_OK; 2282 } 2283 break; 2284 #ifdef MHD_SUPPORT_SELECT 2285 case mhd_POLL_TYPE_SELECT: 2286 mhd_assert (NULL != d->events.data.select.rfds); 2287 mhd_assert (NULL != d->events.data.select.wfds); 2288 mhd_assert (NULL != d->events.data.select.efds); 2289 /* Nothing to do when using 'select()' */ 2290 return MHD_SC_OK; 2291 break; 2292 #endif /* MHD_SUPPORT_SELECT */ 2293 #ifdef MHD_SUPPORT_POLL 2294 case mhd_POLL_TYPE_POLL: 2295 mhd_assert (NULL != d->events.data.poll.fds); 2296 mhd_assert (NULL != d->events.data.poll.rel); 2297 if (1) 2298 { 2299 unsigned int i; 2300 i = 0; 2301 #ifdef MHD_SUPPORT_THREADS 2302 d->events.data.poll.fds[i].fd = mhd_itc_r_fd (d->threading.itc); 2303 d->events.data.poll.fds[i].events = POLLIN; 2304 d->events.data.poll.rel[i].fd_id = mhd_SOCKET_REL_MARKER_ITC; 2305 ++i; 2306 #endif 2307 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2308 { 2309 d->events.data.poll.fds[i].fd = d->net.listen.fd; 2310 d->events.data.poll.fds[i].events = POLLIN; 2311 d->events.data.poll.rel[i].fd_id = mhd_SOCKET_REL_MARKER_LISTEN; 2312 } 2313 } 2314 return MHD_SC_OK; 2315 break; 2316 #endif /* MHD_SUPPORT_POLL */ 2317 #ifdef MHD_SUPPORT_EPOLL 2318 case mhd_POLL_TYPE_EPOLL: 2319 mhd_assert (MHD_INVALID_SOCKET != d->events.data.epoll.e_fd); 2320 mhd_assert (NULL != d->events.data.epoll.events); 2321 mhd_assert (0 < d->events.data.epoll.num_elements); 2322 if (1) 2323 { 2324 struct epoll_event reg_event; 2325 #ifdef MHD_SUPPORT_THREADS 2326 reg_event.events = EPOLLIN | EPOLLET; 2327 reg_event.data.u64 = (uint64_t) mhd_SOCKET_REL_MARKER_ITC; /* uint64_t is used in the epoll header */ 2328 if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD, 2329 mhd_itc_r_fd (d->threading.itc), ®_event)) 2330 { 2331 mhd_LOG_MSG (d, MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE, \ 2332 "Failed to add ITC FD to the epoll monitoring."); 2333 return MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE; 2334 } 2335 mhd_dbg_print_fd_mon_req ("ITC", \ 2336 mhd_itc_r_fd (d->threading.itc), \ 2337 true, \ 2338 false, \ 2339 false); 2340 #endif 2341 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2342 { 2343 reg_event.events = EPOLLIN; 2344 reg_event.data.u64 = (uint64_t) mhd_SOCKET_REL_MARKER_LISTEN; /* uint64_t is used in the epoll header */ 2345 if (0 != epoll_ctl (d->events.data.epoll.e_fd, EPOLL_CTL_ADD, 2346 d->net.listen.fd, ®_event)) 2347 { 2348 mhd_LOG_MSG (d, MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE, \ 2349 "Failed to add listening FD to the epoll monitoring."); 2350 return MHD_SC_EPOLL_ADD_DAEMON_FDS_FAILURE; 2351 } 2352 mhd_dbg_print_fd_mon_req ("lstn", \ 2353 d->net.listen.fd, \ 2354 true, \ 2355 false, \ 2356 false); 2357 } 2358 } 2359 return MHD_SC_OK; 2360 break; 2361 #endif /* MHD_SUPPORT_EPOLL */ 2362 #ifndef MHD_SUPPORT_SELECT 2363 case mhd_POLL_TYPE_SELECT: 2364 #endif /* ! MHD_SUPPORT_SELECT */ 2365 #ifndef MHD_SUPPORT_POLL 2366 case mhd_POLL_TYPE_POLL: 2367 #endif /* ! MHD_SUPPORT_POLL */ 2368 case mhd_POLL_TYPE_NOT_SET_YET: 2369 default: 2370 mhd_UNREACHABLE (); 2371 break; 2372 } 2373 mhd_UNREACHABLE (); 2374 return MHD_SC_INTERNAL_ERROR; 2375 } 2376 2377 2378 /** 2379 * The initial part of events de-initialisation: remove ITC and listening FD 2380 * from the monitored items (if supported by monitoring syscall). 2381 * @param d the daemon object 2382 */ 2383 static MHD_FN_PAR_NONNULL_ (1) void 2384 deinit_daemon_fds_monitoring (struct MHD_Daemon *restrict d) 2385 { 2386 mhd_assert (d->dbg.events_fully_inited); 2387 2388 switch (d->events.poll_type) 2389 { 2390 case mhd_POLL_TYPE_EXT: 2391 if (NULL != d->events.data.extr.listen_data.app_cntx) 2392 (void) mhd_daemon_extr_event_reg ( 2393 d, 2394 d->net.listen.fd, 2395 MHD_FD_STATE_NONE, 2396 d->events.data.extr.listen_data.app_cntx, 2397 (struct MHD_EventUpdateContext *) mhd_SOCKET_REL_MARKER_LISTEN); 2398 #ifdef MHD_SUPPORT_THREADS 2399 if (NULL != d->events.data.extr.itc_data.app_cntx) 2400 (void) mhd_daemon_extr_event_reg (d, 2401 mhd_itc_r_fd (d->threading.itc), 2402 MHD_FD_STATE_NONE, 2403 d->events.data.extr.itc_data.app_cntx, 2404 (struct MHD_EventUpdateContext *) 2405 mhd_SOCKET_REL_MARKER_ITC); 2406 #endif /* MHD_SUPPORT_THREADS */ 2407 return; 2408 #ifdef MHD_SUPPORT_SELECT 2409 case mhd_POLL_TYPE_SELECT: 2410 /* Nothing to do when using 'select()' */ 2411 return; 2412 break; 2413 #endif /* MHD_SUPPORT_SELECT */ 2414 #ifdef MHD_SUPPORT_POLL 2415 case mhd_POLL_TYPE_POLL: 2416 /* Nothing to do when using 'poll()' */ 2417 return; 2418 break; 2419 #endif /* MHD_SUPPORT_POLL */ 2420 #ifdef MHD_SUPPORT_EPOLL 2421 case mhd_POLL_TYPE_EPOLL: 2422 /* Nothing to do when using epoll. 2423 Monitoring stopped by closing epoll FD. */ 2424 return; 2425 break; 2426 #endif /* MHD_SUPPORT_EPOLL */ 2427 #ifndef MHD_SUPPORT_SELECT 2428 case mhd_POLL_TYPE_SELECT: 2429 #endif /* ! MHD_SUPPORT_SELECT */ 2430 #ifndef MHD_SUPPORT_POLL 2431 case mhd_POLL_TYPE_POLL: 2432 #endif /* ! MHD_SUPPORT_POLL */ 2433 case mhd_POLL_TYPE_NOT_SET_YET: 2434 default: 2435 mhd_UNREACHABLE (); 2436 break; 2437 } 2438 mhd_UNREACHABLE (); 2439 } 2440 2441 2442 /** 2443 * Initialise daemon connections' data. 2444 * @param d the daemon object 2445 * @param s the user settings 2446 * @return #MHD_SC_OK on success, 2447 * the error code otherwise 2448 */ 2449 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2450 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2451 init_individual_conns (struct MHD_Daemon *restrict d, 2452 struct DaemonOptions *restrict s) 2453 { 2454 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2455 mhd_assert (0 != d->conns.cfg.count_limit); 2456 2457 mhd_DLINKEDL_INIT_LIST (&(d->conns),all_conn); 2458 mhd_DLINKEDL_INIT_LIST (&(d->conns),def_timeout); 2459 mhd_DLINKEDL_INIT_LIST (&(d->conns),cust_timeout); 2460 d->conns.count = 0; 2461 d->conns.block_new = false; 2462 2463 d->conns.cfg.mem_pool_size = s->conn_memory_limit; 2464 if (0 == d->conns.cfg.mem_pool_size) 2465 d->conns.cfg.mem_pool_size = 32 * 1024; 2466 else if (256 > d->conns.cfg.mem_pool_size) 2467 d->conns.cfg.mem_pool_size = 256; 2468 2469 switch (s->conn_buff_zeroing) 2470 { 2471 case MHD_CONN_BUFFER_ZEROING_DISABLED: 2472 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_NEVER; 2473 break; 2474 case MHD_CONN_BUFFER_ZEROING_BASIC: 2475 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ON_RESET; 2476 break; 2477 case MHD_CONN_BUFFER_ZEROING_HEAVY: 2478 default: 2479 d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ALWAYS; 2480 break; 2481 } 2482 2483 #ifdef MHD_SUPPORT_UPGRADE 2484 mhd_DLINKEDL_INIT_LIST (&(d->conns.upgr),upgr_cleanup); 2485 if (! mhd_mutex_init (&(d->conns.upgr.ucu_lock))) 2486 { 2487 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 2488 "Failed to initialise mutex for the upgraded " \ 2489 "connection list."); 2490 return MHD_SC_MUTEX_INIT_FAILURE; 2491 } 2492 #endif /* MHD_SUPPORT_UPGRADE */ 2493 2494 #ifndef NDEBUG 2495 d->dbg.connections_inited = true; 2496 #endif 2497 return MHD_SC_OK; 2498 } 2499 2500 2501 /** 2502 * Deinitialise daemon connections' data. 2503 * @param d the daemon object 2504 */ 2505 static MHD_FN_PAR_NONNULL_ (1) void 2506 deinit_individual_conns (struct MHD_Daemon *restrict d) 2507 { 2508 #ifdef MHD_SUPPORT_UPGRADE 2509 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns.upgr),upgr_cleanup)); 2510 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns.upgr),upgr_cleanup)); 2511 2512 mhd_mutex_destroy_chk (&(d->conns.upgr.ucu_lock)); 2513 #endif /* MHD_SUPPORT_UPGRADE */ 2514 2515 mhd_assert (0 != d->conns.cfg.mem_pool_size); 2516 mhd_assert (0 == d->conns.count); 2517 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),cust_timeout)); 2518 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),cust_timeout)); 2519 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),def_timeout)); 2520 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),def_timeout)); 2521 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)); 2522 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)); 2523 } 2524 2525 2526 /** 2527 * Prepare daemon-local (worker daemon for thread pool mode) threading data 2528 * and finish events initialising. 2529 * To be used only with non-master daemons. 2530 * Do not start the thread even if configured for the internal threads. 2531 * @param d the daemon object 2532 * @return #MHD_SC_OK on success, 2533 * the error code otherwise 2534 */ 2535 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2536 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2537 init_individual_thread_data_events_conns (struct MHD_Daemon *restrict d, 2538 struct DaemonOptions *restrict s) 2539 { 2540 enum MHD_StatusCode res; 2541 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2542 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2543 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2544 mhd_assert (! d->dbg.connections_inited); 2545 2546 res = allocate_events (d); 2547 if (MHD_SC_OK != res) 2548 return res; 2549 2550 res = init_itc (d); 2551 if (MHD_SC_OK == res) 2552 { 2553 res = init_daemon_fds_monitoring (d); 2554 2555 if (MHD_SC_OK == res) 2556 { 2557 #ifndef NDEBUG 2558 d->dbg.events_fully_inited = true; 2559 #endif 2560 #ifdef MHD_SUPPORT_THREADS 2561 mhd_thread_handle_ID_set_invalid (&(d->threading.tid)); 2562 d->threading.stop_requested = false; 2563 #endif /* MHD_SUPPORT_THREADS */ 2564 #ifndef NDEBUG 2565 d->dbg.threading_inited = true; 2566 #endif 2567 2568 res = init_individual_conns (d, s); 2569 if (MHD_SC_OK == res) 2570 return MHD_SC_OK; 2571 2572 /* Below is a clean-up path */ 2573 2574 deinit_daemon_fds_monitoring (d); 2575 } 2576 deinit_itc (d); 2577 } 2578 deallocate_events (d); 2579 mhd_assert (MHD_SC_OK != res); 2580 return res; 2581 } 2582 2583 2584 /** 2585 * Deinit daemon-local (worker daemon for thread pool mode) threading data 2586 * and deallocate events. 2587 * To be used only with non-master daemons. 2588 * The internal thread (is any) must be stopped already. 2589 * @param d the daemon object 2590 */ 2591 static MHD_FN_PAR_NONNULL_ (1) void 2592 deinit_individual_thread_data_events_conns (struct MHD_Daemon *restrict d) 2593 { 2594 deinit_individual_conns (d); 2595 deinit_daemon_fds_monitoring (d); 2596 deinit_itc (d); 2597 deallocate_events (d); 2598 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn)); 2599 mhd_assert (NULL == mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready)); 2600 #ifndef NDEBUG 2601 d->dbg.events_fully_inited = false; 2602 #endif 2603 } 2604 2605 2606 /** 2607 * Initialise the data specific only for the worker daemon. 2608 * @param d the daemon object 2609 * @return #MHD_SC_OK on success, 2610 * the error code otherwise 2611 */ 2612 static MHD_FN_PAR_NONNULL_ (1) 2613 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2614 init_worker_only_data (struct MHD_Daemon *restrict d) 2615 { 2616 enum MHD_StatusCode res; 2617 struct mhd_DaemonExtAddedConnectionsWorker *worker_only_data = 2618 &(d->events.act_req.ext_added.worker); 2619 2620 mhd_assert (! mhd_D_HAS_WORKERS (d)); 2621 mhd_assert (d->dbg.net_inited); 2622 mhd_assert (! d->dbg.worker_only_inited); 2623 mhd_assert (mhd_D_HAS_MASTER (d) || ! d->dbg.master_only_inited); 2624 mhd_assert (! mhd_D_HAS_MASTER (d) || d->dbg.master_only_inited); /* Copied from master daemon */ 2625 2626 #ifndef NDEBUG 2627 /* "master"-only data will be overwritten here without de-initialising */ 2628 d->dbg.master_only_inited = false; 2629 #endif /* ! NDEBUG */ 2630 2631 mhd_DLINKEDL_INIT_LIST (worker_only_data, queue); 2632 2633 if (! mhd_mutex_init (&(worker_only_data->q_lock))) 2634 { 2635 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 2636 "Failed to initialise mutex for externally added " 2637 "connections"); 2638 res = MHD_SC_MUTEX_INIT_FAILURE; 2639 } 2640 else 2641 res = MHD_SC_OK; 2642 2643 #ifndef NDEBUG 2644 if (MHD_SC_OK == res) 2645 d->dbg.worker_only_inited = true; 2646 #endif /* ! NDEBUG */ 2647 2648 return res; 2649 } 2650 2651 2652 /** 2653 * De-initialise the data specific only for the worker daemon. 2654 * @param d the daemon object 2655 */ 2656 static MHD_FN_PAR_NONNULL_ (1) void 2657 deinit_worker_only_data (struct MHD_Daemon *restrict d) 2658 { 2659 struct mhd_DaemonExtAddedConnectionsWorker *worker_only_data = 2660 &(d->events.act_req.ext_added.worker); 2661 struct mhd_DaemonExtAddedConn *q_e; 2662 2663 mhd_assert (! mhd_D_HAS_WORKERS (d)); 2664 mhd_assert (d->dbg.net_inited); 2665 mhd_assert (d->dbg.worker_only_inited); 2666 mhd_assert (! d->dbg.master_only_inited); 2667 2668 /* Clean-up all unprocessed entries */ 2669 2670 for (q_e = mhd_DLINKEDL_GET_FIRST (worker_only_data, queue); 2671 NULL != q_e; 2672 q_e = mhd_DLINKEDL_GET_FIRST (worker_only_data, queue)) 2673 { 2674 mhd_ASSUME (NULL == mhd_DLINKEDL_GET_PREV (q_e, queue)); 2675 mhd_DLINKEDL_DEL (worker_only_data, q_e, queue); 2676 mhd_socket_close (q_e->skt); 2677 2678 if (NULL != q_e->addr) 2679 free (q_e->addr); 2680 2681 free (q_e); 2682 } 2683 2684 mhd_mutex_destroy_chk (&(worker_only_data->q_lock)); 2685 2686 #ifndef NDEBUG 2687 d->dbg.worker_only_inited = false; 2688 #endif /* ! NDEBUG */ 2689 } 2690 2691 2692 /** 2693 * Initialise worker daemon (the only daemon or member of the worker pool) 2694 * worker-specific daemon data, individual thread data and finish events 2695 * initialising. 2696 * To be used only with non-master daemons. 2697 * Do not start the thread even if configured for the internal threads. 2698 * @param d the daemon object 2699 * @return #MHD_SC_OK on success, 2700 * the error code otherwise 2701 */ 2702 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2703 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2704 init_worker (struct MHD_Daemon *restrict d, 2705 struct DaemonOptions *restrict s) 2706 { 2707 enum MHD_StatusCode res; 2708 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2709 2710 res = init_worker_only_data (d); 2711 2712 if (MHD_SC_OK == res) 2713 { 2714 res = init_individual_thread_data_events_conns (d, 2715 s); 2716 2717 if (MHD_SC_OK == res) 2718 return MHD_SC_OK; 2719 2720 /* Below is a clean-up path */ 2721 2722 deinit_worker_only_data (d); 2723 } 2724 2725 mhd_assert (MHD_SC_OK != res); 2726 2727 return res; 2728 } 2729 2730 2731 /** 2732 * De-initialise worker daemon (the only daemon or member of the worker pool) 2733 * worker-specific daemon data, individual thread data and finish events 2734 * initialising. 2735 * To be used only with non-master daemons. 2736 * The internal thread (is any) must be stopped already. 2737 * @param d the daemon object 2738 */ 2739 static MHD_FN_PAR_NONNULL_ (1) void 2740 deinit_worker (struct MHD_Daemon *restrict d) 2741 { 2742 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2743 2744 deinit_individual_thread_data_events_conns (d); 2745 2746 deinit_worker_only_data (d); 2747 } 2748 2749 2750 /** 2751 * Set the maximum number of handled connections for the daemon. 2752 * Works only for global limit, does not work for the worker daemon. 2753 * @param d the daemon object 2754 * @param s the user settings 2755 * @return #MHD_SC_OK on success, 2756 * the error code otherwise 2757 */ 2758 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 2759 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 2760 set_connections_total_limits (struct MHD_Daemon *restrict d, 2761 struct DaemonOptions *restrict s) 2762 { 2763 unsigned int limit_by_conf; 2764 unsigned int limit_by_num; 2765 unsigned int limit_by_select; 2766 unsigned int resulting_limit; 2767 bool error_by_fd_setsize; 2768 unsigned int num_worker_daemons; 2769 2770 mhd_assert (! mhd_D_HAS_MASTER (d)); 2771 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2772 2773 num_worker_daemons = 1; 2774 #ifdef MHD_SUPPORT_THREADS 2775 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int) 2776 { 2777 mhd_assert (MHD_WM_WORKER_THREADS == s->work_mode.mode); 2778 if ((0 != s->global_connection_limit) && 2779 (0 != s->work_mode.params.num_worker_threads) && 2780 (s->global_connection_limit < s->work_mode.params.num_worker_threads)) 2781 { 2782 mhd_LOG_MSG ( \ 2783 d, MHD_SC_CONFIGURATION_CONN_LIMIT_TOO_SMALL, \ 2784 "The limit specified by MHD_D_O_GLOBAL_CONNECTION_LIMIT is smaller " \ 2785 "then the number of worker threads."); 2786 return MHD_SC_CONFIGURATION_CONN_LIMIT_TOO_SMALL; 2787 } 2788 } 2789 if (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)) 2790 num_worker_daemons = s->work_mode.params.num_worker_threads; 2791 #endif /* MHD_SUPPORT_THREADS */ 2792 2793 limit_by_conf = s->global_connection_limit; 2794 limit_by_num = UINT_MAX; 2795 limit_by_select = UINT_MAX; 2796 mhd_assert (UINT_MAX == limit_by_num); /* Mute analyser warning */ 2797 2798 error_by_fd_setsize = false; 2799 #ifdef MHD_SOCKETS_KIND_POSIX 2800 if (1) 2801 { 2802 limit_by_num = (unsigned int) d->net.cfg.max_fd_num; 2803 if (0 != limit_by_num) 2804 { 2805 /* Find the upper limit. 2806 The real limit is lower, as any other process FDs will use the slots 2807 in the allowed numbers range */ 2808 limit_by_num -= 3; /* The numbers zero, one and two are used typically */ 2809 #ifdef MHD_SUPPORT_THREADS 2810 limit_by_num -= mhd_ITC_NUM_FDS * num_worker_daemons; 2811 #endif /* MHD_SUPPORT_THREADS */ 2812 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2813 --limit_by_num; /* One FD is used for the listening socket */ 2814 if ((num_worker_daemons > limit_by_num) || 2815 (limit_by_num > (unsigned int) d->net.cfg.max_fd_num) /* Underflow */) 2816 { 2817 if (d->net.cfg.max_fd_num == s->fd_number_limit) 2818 { 2819 mhd_LOG_MSG ( \ 2820 d, MHD_SC_MAX_FD_NUMBER_LIMIT_TOO_STRICT, \ 2821 "The limit specified by MHD_D_O_FD_NUMBER_LIMIT is too strict " \ 2822 "for this daemon settings."); 2823 return MHD_SC_MAX_FD_NUMBER_LIMIT_TOO_STRICT; 2824 } 2825 else 2826 { 2827 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 2828 error_by_fd_setsize = true; 2829 } 2830 } 2831 } 2832 else 2833 limit_by_num = (unsigned int) INT_MAX; 2834 } 2835 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 2836 if (1) 2837 { 2838 #ifdef MHD_SUPPORT_SELECT 2839 if ((mhd_DAEMON_TYPE_SINGLE == d->threading.d_type) && 2840 (mhd_POLL_TYPE_SELECT == d->events.poll_type)) 2841 { 2842 /* W32 limits the total number (count) of sockets used for select() */ 2843 unsigned int limit_per_worker; 2844 2845 limit_per_worker = FD_SETSIZE; 2846 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2847 --limit_per_worker; /* The slot for the listening socket */ 2848 #ifdef MHD_SUPPORT_THREADS 2849 --limit_per_worker; /* The slot for the ITC */ 2850 #endif /* MHD_SUPPORT_THREADS */ 2851 if ((0 == limit_per_worker) || (limit_per_worker > FD_SETSIZE)) 2852 error_by_fd_setsize = true; 2853 else 2854 { 2855 limit_by_select = limit_per_worker * num_worker_daemons; 2856 if (limit_by_select / limit_per_worker != num_worker_daemons) 2857 limit_by_select = UINT_MAX; 2858 } 2859 } 2860 #endif /* MHD_SUPPORT_SELECT */ 2861 (void) 0; /* Mute compiler warning */ 2862 } 2863 #endif /* MHD_SOCKETS_KIND_POSIX */ 2864 if (error_by_fd_setsize) 2865 { 2866 mhd_LOG_MSG ( \ 2867 d, MHD_SC_SYS_FD_SETSIZE_TOO_STRICT, \ 2868 "The FD_SETSIZE is too strict to run daemon with the polling " \ 2869 "by select() and with the specified number of workers."); 2870 return MHD_SC_SYS_FD_SETSIZE_TOO_STRICT; 2871 } 2872 2873 if (0 != limit_by_conf) 2874 { 2875 /* The number has bet set explicitly */ 2876 resulting_limit = limit_by_conf; 2877 } 2878 else 2879 { 2880 /* No user configuration provided */ 2881 unsigned int suggested_limit; 2882 #ifndef MHD_SOCKETS_KIND_WINSOCK 2883 #define TYPICAL_NOFILES_LIMIT (1024) /* The usual limit for the number of open FDs */ 2884 suggested_limit = TYPICAL_NOFILES_LIMIT; 2885 suggested_limit -= 3; /* The numbers zero, one and two are used typically */ 2886 #ifdef MHD_SUPPORT_THREADS 2887 suggested_limit -= mhd_ITC_NUM_FDS * num_worker_daemons; 2888 #endif /* MHD_SUPPORT_THREADS */ 2889 if (MHD_INVALID_SOCKET != d->net.listen.fd) 2890 --suggested_limit; /* One FD is used for the listening socket */ 2891 if (suggested_limit > TYPICAL_NOFILES_LIMIT) 2892 suggested_limit = 0; /* Overflow */ 2893 #else /* MHD_SOCKETS_KIND_WINSOCK */ 2894 #ifdef _WIN64 2895 suggested_limit = 2048; 2896 #else 2897 suggested_limit = 1024; 2898 #endif 2899 #endif /* MHD_SOCKETS_KIND_WINSOCK */ 2900 if (suggested_limit < num_worker_daemons) 2901 { 2902 /* Use at least one connection for every worker daemon and 2903 let the system to restrict the new connections if they are above 2904 the system limits. */ 2905 suggested_limit = num_worker_daemons; 2906 } 2907 resulting_limit = suggested_limit; 2908 } 2909 if (resulting_limit > limit_by_num) 2910 resulting_limit = limit_by_num; 2911 2912 if (resulting_limit > limit_by_select) 2913 resulting_limit = limit_by_select; 2914 2915 mhd_assert (resulting_limit >= num_worker_daemons); 2916 d->conns.cfg.count_limit = resulting_limit; 2917 if (d->conns.cfg.per_ip_limit <= d->conns.cfg.count_limit) 2918 d->conns.cfg.per_ip_limit = 0; /* Already enforced by global limit */ 2919 2920 return MHD_SC_OK; 2921 } 2922 2923 2924 /** 2925 * Set correct daemon threading type. 2926 * Set the number of workers for thread pool type. 2927 * @param d the daemon object 2928 * @return #MHD_SC_OK on success, 2929 * the error code otherwise 2930 */ 2931 MHD_FN_PAR_NONNULL_ (1) \ 2932 MHD_FN_MUST_CHECK_RESULT_ static inline enum MHD_StatusCode 2933 set_d_threading_type (struct MHD_Daemon *restrict d) 2934 { 2935 switch (d->wmode_int) 2936 { 2937 case mhd_WM_INT_EXTERNAL_EVENTS_EDGE: 2938 case mhd_WM_INT_EXTERNAL_EVENTS_LEVEL: 2939 mhd_assert (! mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2940 mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type); 2941 mhd_assert (NULL != d->events.data.extr.cb_data.cb); 2942 #ifdef MHD_SUPPORT_THREADS 2943 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 2944 #endif /* MHD_SUPPORT_THREADS */ 2945 return MHD_SC_OK; 2946 case mhd_WM_INT_INTERNAL_EVENTS_NO_THREADS: 2947 mhd_assert (! mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2948 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2949 #ifdef MHD_SUPPORT_THREADS 2950 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 2951 #endif /* MHD_SUPPORT_THREADS */ 2952 return MHD_SC_OK; 2953 #ifdef MHD_SUPPORT_THREADS 2954 case mhd_WM_INT_INTERNAL_EVENTS_ONE_THREAD: 2955 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2956 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2957 d->threading.d_type = mhd_DAEMON_TYPE_SINGLE; 2958 return MHD_SC_OK; 2959 case mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION: 2960 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2961 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2962 mhd_assert (! mhd_POLL_TYPE_INT_IS_EPOLL (d->events.poll_type)); 2963 d->threading.d_type = mhd_DAEMON_TYPE_LISTEN_ONLY; 2964 return MHD_SC_OK; 2965 case mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL: 2966 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 2967 mhd_assert (mhd_POLL_TYPE_EXT != d->events.poll_type); 2968 d->threading.d_type = mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY; 2969 return MHD_SC_OK; 2970 #endif /* MHD_SUPPORT_THREADS */ 2971 default: 2972 break; 2973 } 2974 mhd_UNREACHABLE (); 2975 return MHD_SC_INTERNAL_ERROR; 2976 } 2977 2978 2979 #ifdef MHD_SUPPORT_THREADS 2980 2981 /** 2982 * De-initialise workers pool, including workers daemons. 2983 * The threads must be not running. 2984 * @param d the daemon object 2985 * @param num_workers the number of workers to deinit 2986 */ 2987 static MHD_FN_PAR_NONNULL_ (1) void 2988 deinit_workers_pool (struct MHD_Daemon *restrict d, 2989 unsigned int num_workers) 2990 { 2991 unsigned int i; 2992 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2993 mhd_assert (NULL != d->threading.hier.pool.workers); 2994 mhd_assert ((2 <= d->threading.hier.pool.num) || \ 2995 (mhd_DAEMON_STATE_STARTING == d->state)); 2996 mhd_assert ((num_workers == d->threading.hier.pool.num) || \ 2997 (mhd_DAEMON_STATE_STARTING == d->state)); 2998 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 2999 (mhd_DAEMON_STATE_STARTING == d->state)); 3000 3001 /* Deinitialise in reverse order */ 3002 for (i = num_workers - 1; num_workers > i; --i) 3003 { /* Note: loop exits after underflow of 'i' */ 3004 struct MHD_Daemon *const worker = d->threading.hier.pool.workers + i; 3005 deinit_worker (worker); 3006 #ifdef MHD_SUPPORT_EPOLL 3007 if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type) 3008 deinit_epoll (worker); 3009 #endif /* MHD_SUPPORT_EPOLL */ 3010 } 3011 free (d->threading.hier.pool.workers); 3012 #ifndef NDEBUG 3013 d->dbg.thread_pool_inited = false; 3014 #endif 3015 } 3016 3017 3018 /** 3019 * Nullify worker daemon member that should be set only in master daemon 3020 * @param d the daemon object 3021 */ 3022 static MHD_FN_PAR_NONNULL_ (1) void 3023 reset_master_only_areas (struct MHD_Daemon *restrict d) 3024 { 3025 #ifdef MHD_SUPPORT_AUTH_DIGEST 3026 memset (&(d->auth_dg.nonces_lock), 3027 0x7F, 3028 sizeof(d->auth_dg.nonces_lock)); 3029 #endif 3030 /* Not needed. It is initialised later */ 3031 /* memset (&(d->req_cfg.large_buf), 0, sizeof(d->req_cfg.large_buf)); */ 3032 (void) d; 3033 } 3034 3035 3036 /** 3037 * Initialise workers pool, including workers daemons. 3038 * Do not start the threads. 3039 * @param d the daemon object 3040 * @param s the user settings 3041 * @return #MHD_SC_OK on success, 3042 * the error code otherwise 3043 */ 3044 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3045 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3046 init_workers_pool (struct MHD_Daemon *restrict d, 3047 struct DaemonOptions *restrict s) 3048 { 3049 enum MHD_StatusCode res; 3050 size_t workers_pool_size; 3051 unsigned int conn_per_daemon; 3052 unsigned int num_workers; 3053 unsigned int conn_remainder; 3054 unsigned int i; 3055 3056 mhd_assert (d->dbg.net_inited); 3057 mhd_assert (! d->dbg.net_deinited); 3058 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int); 3059 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3060 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET < d->events.poll_type); 3061 mhd_assert (1 < s->work_mode.params.num_worker_threads); 3062 mhd_assert (0 != d->conns.cfg.count_limit); 3063 mhd_assert (s->work_mode.params.num_worker_threads <= \ 3064 d->conns.cfg.count_limit); 3065 mhd_assert (! d->dbg.thread_pool_inited); 3066 3067 num_workers = s->work_mode.params.num_worker_threads; 3068 workers_pool_size = 3069 (sizeof(struct MHD_Daemon) * num_workers); 3070 if (workers_pool_size / num_workers != sizeof(struct MHD_Daemon)) 3071 { /* Overflow */ 3072 mhd_LOG_MSG ( \ 3073 d, MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE, \ 3074 "The size of the thread pool is too large."); 3075 return MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE; 3076 } 3077 3078 #ifndef NDEBUG 3079 mhd_itc_set_invalid (&(d->threading.itc)); 3080 mhd_thread_handle_ID_set_invalid (&(d->threading.tid)); 3081 #endif 3082 3083 d->threading.hier.pool.workers = (struct MHD_Daemon *) 3084 malloc (workers_pool_size); 3085 if (NULL == d->threading.hier.pool.workers) 3086 { 3087 mhd_LOG_MSG ( \ 3088 d, MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE, \ 3089 "Failed to allocate memory for the thread pool."); 3090 return MHD_SC_THREAD_POOL_MEM_ALLOC_FAILURE; 3091 } 3092 3093 conn_per_daemon = d->conns.cfg.count_limit / num_workers; 3094 conn_remainder = d->conns.cfg.count_limit % num_workers; 3095 res = MHD_SC_OK; 3096 for (i = 0; num_workers > i; ++i) 3097 { 3098 struct MHD_Daemon *restrict const worker = 3099 d->threading.hier.pool.workers + i; 3100 memcpy (worker, d, sizeof(struct MHD_Daemon)); 3101 reset_master_only_areas (worker); 3102 3103 worker->threading.d_type = mhd_DAEMON_TYPE_WORKER; 3104 worker->threading.hier.master = d; 3105 worker->conns.cfg.count_limit = conn_per_daemon; 3106 if (conn_remainder > i) 3107 worker->conns.cfg.count_limit++; /* Distribute the reminder */ 3108 #ifdef MHD_SUPPORT_EPOLL 3109 if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type) 3110 { 3111 if (0 == i) 3112 { 3113 mhd_assert (0 <= d->events.data.epoll.e_fd); 3114 /* Move epoll control FD from the master daemon to the first worker */ 3115 /* The FD has been copied by memcpy(). Clean-up the master daemon. */ 3116 d->events.data.epoll.e_fd = MHD_INVALID_SOCKET; 3117 } 3118 else 3119 res = init_epoll (worker, 3120 true); 3121 } 3122 #endif /* MHD_SUPPORT_EPOLL */ 3123 if (MHD_SC_OK == res) 3124 { 3125 res = init_worker (worker, 3126 s); 3127 if (MHD_SC_OK == res) 3128 continue; /* Process the next worker */ 3129 3130 /* Below is a clean-up of the current slot */ 3131 3132 #ifdef MHD_SUPPORT_EPOLL 3133 if (mhd_POLL_TYPE_EPOLL == worker->events.poll_type) 3134 deinit_epoll (worker); 3135 #endif /* MHD_SUPPORT_EPOLL */ 3136 } 3137 break; 3138 } 3139 if (num_workers == i) 3140 { 3141 mhd_assert (MHD_SC_OK == res); 3142 #ifndef NDEBUG 3143 d->dbg.thread_pool_inited = true; 3144 d->dbg.threading_inited = true; 3145 #endif 3146 d->threading.hier.pool.num = num_workers; 3147 return MHD_SC_OK; 3148 } 3149 3150 /* Below is a clean-up */ 3151 3152 mhd_assert (MHD_SC_OK != res); 3153 deinit_workers_pool (d, i); 3154 return res; 3155 } 3156 3157 3158 /** 3159 * Initialise data specific only for the master daemon. 3160 * @param d the daemon object 3161 * @return #MHD_SC_OK on success, 3162 * the error code otherwise 3163 */ 3164 static MHD_FN_PAR_NONNULL_ (1) 3165 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3166 init_master_only_data (struct MHD_Daemon *restrict d) 3167 { 3168 enum MHD_StatusCode res; 3169 3170 mhd_assert (mhd_D_HAS_WORKERS (d)); 3171 mhd_assert (d->dbg.net_inited); 3172 mhd_assert (! d->dbg.master_only_inited); 3173 mhd_assert (! d->dbg.worker_only_inited); 3174 3175 if (! mhd_atomic_counter_init ( \ 3176 &(d->events.act_req.ext_added.master.next_d_idx), 3177 0)) 3178 { 3179 mhd_LOG_MSG (d, MHD_SC_MUTEX_INIT_FAILURE, \ 3180 "Failed to initialise atomic counter for externally added " 3181 "connections"); 3182 res = MHD_SC_MUTEX_INIT_FAILURE; 3183 } 3184 else 3185 res = MHD_SC_OK; 3186 3187 #ifndef NDEBUG 3188 if (MHD_SC_OK == res) 3189 d->dbg.master_only_inited = true; 3190 #endif /* ! NDEBUG */ 3191 3192 return res; 3193 } 3194 3195 3196 /** 3197 * De-initialise data specific only for the master daemon. 3198 * @param d the daemon object 3199 */ 3200 static MHD_FN_PAR_NONNULL_ (1) void 3201 deinit_master_only_data (struct MHD_Daemon *restrict d) 3202 { 3203 mhd_assert (mhd_D_HAS_WORKERS (d)); 3204 mhd_assert (d->dbg.master_only_inited); 3205 mhd_assert (! d->dbg.worker_only_inited); 3206 3207 mhd_atomic_counter_deinit (&(d->events.act_req.ext_added.master.next_d_idx)); 3208 3209 #ifndef NDEBUG 3210 d->dbg.master_only_inited = false; 3211 #endif /* ! NDEBUG */ 3212 } 3213 3214 3215 /** 3216 * Initialise individual events, connection data for the "master" daemon, 3217 * including master-only data, the workers pool, and the workers daemons, 3218 * including individual worker-specific threading and other data. 3219 * Do not start the threads. 3220 * @param d the daemon object 3221 * @param s the user settings 3222 * @return #MHD_SC_OK on success, 3223 * the error code otherwise 3224 */ 3225 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3226 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3227 init_master (struct MHD_Daemon *restrict d, 3228 struct DaemonOptions *restrict s) 3229 { 3230 enum MHD_StatusCode res; 3231 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3232 3233 res = init_master_only_data (d); 3234 if (MHD_SC_OK != res) 3235 return res; 3236 3237 res = init_workers_pool (d, 3238 s); 3239 if (MHD_SC_OK == res) 3240 { 3241 /* Copy some settings to the master daemon */ 3242 d->conns.cfg.mem_pool_size = 3243 d->threading.hier.pool.workers[0].conns.cfg.mem_pool_size; 3244 3245 return res; 3246 } 3247 3248 /* Below is a clean-up path */ 3249 3250 deinit_master_only_data (d); 3251 3252 mhd_assert (MHD_SC_OK != res); 3253 return res; 3254 } 3255 3256 3257 /** 3258 * De-initialise individual events, connection data for the "master" daemon, 3259 * including master-only data, the workers pool, and the workers daemons, 3260 * including individual worker-specific threading and other data. 3261 * The threads must be not running. 3262 * @param d the daemon object 3263 */ 3264 static MHD_FN_PAR_NONNULL_ (1) void 3265 deinit_master (struct MHD_Daemon *restrict d) 3266 { 3267 deinit_workers_pool (d, 3268 d->threading.hier.pool.num); 3269 3270 deinit_master_only_data (d); 3271 } 3272 3273 3274 #endif /* MHD_SUPPORT_THREADS */ 3275 3276 /** 3277 * Initialise threading and inter-thread communications. 3278 * Also finish initialisation of events processing and initialise daemon's 3279 * connection data. 3280 * Do not start the thread even if configured for the internal threads. 3281 * @param d the daemon object 3282 * @param s the user settings 3283 * @return #MHD_SC_OK on success, 3284 * the error code otherwise 3285 */ 3286 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3287 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3288 daemon_init_threading_and_conn (struct MHD_Daemon *restrict d, 3289 struct DaemonOptions *restrict s) 3290 { 3291 enum MHD_StatusCode res; 3292 3293 mhd_assert (d->dbg.net_inited); 3294 mhd_assert (! d->dbg.net_deinited); 3295 mhd_assert (mhd_POLL_TYPE_NOT_SET_YET != d->events.poll_type); 3296 3297 res = set_d_threading_type (d); 3298 if (MHD_SC_OK != res) 3299 return res; 3300 3301 res = set_connections_total_limits (d, s); 3302 if (MHD_SC_OK != res) 3303 return res; 3304 3305 #ifdef MHD_SUPPORT_THREADS 3306 d->threading.cfg.stack_size = s->stack_size; 3307 #endif /* MHD_SUPPORT_THREADS */ 3308 3309 if (! mhd_D_HAS_WORKERS (d)) 3310 res = init_worker (d, 3311 s); 3312 #ifdef MHD_SUPPORT_THREADS 3313 else 3314 res = init_master (d, 3315 s); 3316 #endif /* MHD_SUPPORT_THREADS */ 3317 3318 if (MHD_SC_OK == res) 3319 { 3320 mhd_assert (d->dbg.events_allocated || \ 3321 mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3322 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type) || \ 3323 ! d->dbg.events_allocated); 3324 mhd_assert (! d->dbg.thread_pool_inited || \ 3325 mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3326 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type) || \ 3327 d->dbg.thread_pool_inited); 3328 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3329 mhd_assert (! d->dbg.events_allocated || d->dbg.connections_inited); 3330 mhd_assert (! d->dbg.connections_inited || d->dbg.events_allocated); 3331 } 3332 return res; 3333 } 3334 3335 3336 /** 3337 * De-initialise threading and inter-thread communications. 3338 * Also deallocate events and de-initialise daemon's connection data. 3339 * No daemon-manged threads should be running. 3340 * @param d the daemon object 3341 */ 3342 static MHD_FN_PAR_NONNULL_ (1) void 3343 daemon_deinit_threading_and_conn (struct MHD_Daemon *restrict d) 3344 { 3345 mhd_assert (d->dbg.net_inited); 3346 mhd_assert (! d->dbg.net_deinited); 3347 mhd_assert (d->dbg.threading_inited); 3348 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3349 if (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)) 3350 { 3351 mhd_assert (! mhd_WM_INT_IS_THREAD_POOL (d->wmode_int)); 3352 mhd_assert (d->dbg.connections_inited); 3353 mhd_assert (d->dbg.events_allocated); 3354 mhd_assert (! d->dbg.thread_pool_inited); 3355 deinit_worker (d); 3356 } 3357 else 3358 { 3359 #ifdef MHD_SUPPORT_THREADS 3360 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL == d->wmode_int); 3361 mhd_assert (! d->dbg.connections_inited); 3362 mhd_assert (! d->dbg.events_allocated); 3363 mhd_assert (d->dbg.thread_pool_inited); 3364 deinit_master (d); 3365 #else /* ! MHD_SUPPORT_THREADS */ 3366 mhd_assert (0 && "Impossible value"); 3367 mhd_UNREACHABLE (); 3368 (void) 0; 3369 #endif /* ! MHD_SUPPORT_THREADS */ 3370 } 3371 } 3372 3373 3374 #ifdef MHD_SUPPORT_THREADS 3375 3376 /** 3377 * Start the daemon individual single thread. 3378 * Works both for single thread daemons and for worker daemon for thread 3379 * pool mode. 3380 * Must be called only for daemons with internal threads. 3381 * @param d the daemon object, must be completely initialised 3382 * @return #MHD_SC_OK on success, 3383 * the error code otherwise 3384 */ 3385 static MHD_FN_PAR_NONNULL_ (1) 3386 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3387 start_individual_daemon_thread (struct MHD_Daemon *restrict d) 3388 { 3389 mhd_assert (d->dbg.threading_inited); 3390 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3391 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3392 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3393 mhd_assert (! mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3394 3395 if (mhd_DAEMON_TYPE_SINGLE == d->threading.d_type) 3396 { 3397 if (! mhd_create_named_thread ( \ 3398 &(d->threading.tid), "MHD-single", \ 3399 d->threading.cfg.stack_size, \ 3400 &mhd_worker_all_events, \ 3401 (void*) d)) 3402 { 3403 mhd_LOG_MSG (d, MHD_SC_THREAD_MAIN_LAUNCH_FAILURE, \ 3404 "Failed to start daemon main thread."); 3405 return MHD_SC_THREAD_MAIN_LAUNCH_FAILURE; 3406 } 3407 } 3408 else if (mhd_DAEMON_TYPE_WORKER == d->threading.d_type) 3409 { 3410 if (! mhd_create_named_thread ( \ 3411 &(d->threading.tid), "MHD-worker", \ 3412 d->threading.cfg.stack_size, \ 3413 &mhd_worker_all_events, \ 3414 (void*) d)) 3415 { 3416 mhd_LOG_MSG (d, MHD_SC_THREAD_WORKER_LAUNCH_FAILURE, \ 3417 "Failed to start daemon worker thread."); 3418 return MHD_SC_THREAD_WORKER_LAUNCH_FAILURE; 3419 } 3420 } 3421 else if (mhd_DAEMON_TYPE_LISTEN_ONLY == d->threading.d_type) 3422 { 3423 if (! mhd_create_named_thread ( \ 3424 &(d->threading.tid), "MHD-listen", \ 3425 d->threading.cfg.stack_size, \ 3426 &mhd_worker_listening_only, \ 3427 (void*) d)) 3428 { 3429 mhd_LOG_MSG (d, MHD_SC_THREAD_LISTENING_LAUNCH_FAILURE, \ 3430 "Failed to start daemon listening thread."); 3431 return MHD_SC_THREAD_LISTENING_LAUNCH_FAILURE; 3432 } 3433 } 3434 else 3435 { 3436 mhd_assert (0 && "Impossible value"); 3437 mhd_UNREACHABLE (); 3438 return MHD_SC_INTERNAL_ERROR; 3439 } 3440 mhd_assert (mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3441 return MHD_SC_OK; 3442 } 3443 3444 3445 /** 3446 * Stop the daemon individual single thread. 3447 * Works both for single thread daemons and for worker daemon for thread 3448 * pool mode. 3449 * Must be called only for daemons with internal threads. 3450 * @param d the daemon object, must be completely initialised 3451 */ 3452 MHD_FN_PAR_NONNULL_ (1) static void 3453 stop_individual_daemon_thread (struct MHD_Daemon *restrict d) 3454 { 3455 mhd_assert (d->dbg.threading_inited); 3456 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3457 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3458 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3459 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 3460 (mhd_DAEMON_STATE_STARTING == d->state)); 3461 mhd_assert (mhd_thread_handle_ID_is_valid_handle (d->threading.tid)); 3462 3463 d->threading.stop_requested = true; 3464 3465 mhd_daemon_trigger_itc (d); 3466 if (! mhd_thread_handle_ID_join_thread (d->threading.tid)) 3467 { 3468 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_ERROR, \ 3469 "Failed to stop daemon main thread."); 3470 } 3471 } 3472 3473 3474 /** 3475 * Stop all worker threads in the thread pool. 3476 * Must be called only for master daemons with thread pool. 3477 * @param d the daemon object, the workers threads must be running 3478 * @param num_workers the number of threads to stop 3479 */ 3480 static MHD_FN_PAR_NONNULL_ (1) void 3481 stop_worker_pool_threads (struct MHD_Daemon *restrict d, 3482 unsigned int num_workers) 3483 { 3484 unsigned int i; 3485 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3486 mhd_assert (NULL != d->threading.hier.pool.workers); 3487 mhd_assert (0 != d->threading.hier.pool.num); 3488 mhd_assert (d->dbg.thread_pool_inited); 3489 mhd_assert (2 <= d->threading.hier.pool.num); 3490 mhd_assert ((num_workers == d->threading.hier.pool.num) || \ 3491 (mhd_DAEMON_STATE_STARTING == d->state)); 3492 mhd_assert ((mhd_DAEMON_STATE_STOPPING == d->state) || \ 3493 (mhd_DAEMON_STATE_STARTING == d->state)); 3494 3495 /* Process all the threads in the reverse order */ 3496 3497 /* Trigger all threads */ 3498 for (i = num_workers - 1; num_workers > i; --i) 3499 { /* Note: loop exits after underflow of 'i' */ 3500 d->threading.hier.pool.workers[i].threading.stop_requested = true; 3501 mhd_assert (mhd_ITC_IS_VALID ( \ 3502 d->threading.hier.pool.workers[i].threading.itc)); 3503 mhd_daemon_trigger_itc (d->threading.hier.pool.workers + i); 3504 } 3505 3506 /* Collect all threads */ 3507 for (i = num_workers - 1; num_workers > i; --i) 3508 { /* Note: loop exits after underflow of 'i' */ 3509 struct MHD_Daemon *const restrict worker = 3510 d->threading.hier.pool.workers + i; 3511 mhd_assert (mhd_thread_handle_ID_is_valid_handle (worker->threading.tid)); 3512 if (! mhd_thread_handle_ID_join_thread (worker->threading.tid)) 3513 { 3514 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_ERROR, \ 3515 "Failed to stop a worker thread."); 3516 } 3517 } 3518 } 3519 3520 3521 /** 3522 * Start the workers pool threads. 3523 * Must be called only for master daemons with thread pool. 3524 * @param d the daemon object, must be completely initialised 3525 * @return #MHD_SC_OK on success, 3526 * the error code otherwise 3527 */ 3528 static MHD_FN_PAR_NONNULL_ (1) 3529 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3530 start_worker_pool_threads (struct MHD_Daemon *restrict d) 3531 { 3532 enum MHD_StatusCode res; 3533 unsigned int i; 3534 3535 mhd_assert (d->dbg.threading_inited); 3536 mhd_assert (mhd_WM_INT_HAS_THREADS (d->wmode_int)); 3537 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 3538 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3539 mhd_assert (d->dbg.thread_pool_inited); 3540 mhd_assert (2 <= d->threading.hier.pool.num); 3541 3542 res = MHD_SC_OK; 3543 3544 for (i = 0; d->threading.hier.pool.num > i; ++i) 3545 { 3546 res = start_individual_daemon_thread (d->threading.hier.pool.workers + i); 3547 if (MHD_SC_OK != res) 3548 break; 3549 } 3550 if (d->threading.hier.pool.num == i) 3551 { 3552 mhd_assert (MHD_SC_OK == res); 3553 return MHD_SC_OK; 3554 } 3555 3556 stop_worker_pool_threads (d, i); 3557 mhd_assert (MHD_SC_OK != res); 3558 return res; 3559 } 3560 3561 3562 #endif /* MHD_SUPPORT_THREADS */ 3563 3564 /** 3565 * Start the daemon internal threads, if the daemon configured to use them. 3566 * @param d the daemon object, must be completely initialised 3567 * @return #MHD_SC_OK on success, 3568 * the error code otherwise 3569 */ 3570 static MHD_FN_PAR_NONNULL_ (1) 3571 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3572 daemon_start_threads (struct MHD_Daemon *restrict d) 3573 { 3574 mhd_assert (d->dbg.net_inited); 3575 mhd_assert (! d->dbg.net_deinited); 3576 mhd_assert (d->dbg.threading_inited); 3577 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3578 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 3579 { 3580 #ifdef MHD_SUPPORT_THREADS 3581 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL != d->wmode_int) 3582 { 3583 mhd_assert (d->dbg.threading_inited); 3584 mhd_assert (mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY != d->threading.d_type); 3585 return start_individual_daemon_thread (d); 3586 } 3587 else 3588 { 3589 mhd_assert (d->dbg.thread_pool_inited); 3590 mhd_assert (mhd_DAEMON_TYPE_MASTER_CONTROL_ONLY == d->threading.d_type); 3591 return start_worker_pool_threads (d); 3592 } 3593 #else /* ! MHD_SUPPORT_THREADS */ 3594 mhd_assert (0 && "Impossible value"); 3595 mhd_UNREACHABLE (); 3596 return MHD_SC_INTERNAL_ERROR; 3597 #endif /* ! MHD_SUPPORT_THREADS */ 3598 } 3599 return MHD_SC_OK; 3600 } 3601 3602 3603 /** 3604 * Stop the daemon internal threads, if the daemon configured to use them. 3605 * @param d the daemon object to stop threads 3606 */ 3607 static MHD_FN_PAR_NONNULL_ (1) void 3608 daemon_stop_threads (struct MHD_Daemon *restrict d) 3609 { 3610 mhd_assert (d->dbg.net_inited); 3611 mhd_assert (! d->dbg.net_deinited); 3612 mhd_assert (d->dbg.threading_inited); 3613 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 3614 { 3615 #ifdef MHD_SUPPORT_THREADS 3616 if (mhd_WM_INT_INTERNAL_EVENTS_THREAD_POOL != d->wmode_int) 3617 { 3618 mhd_assert (d->dbg.threading_inited); 3619 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3620 stop_individual_daemon_thread (d); 3621 return; 3622 } 3623 else 3624 { 3625 mhd_assert (d->dbg.thread_pool_inited); 3626 mhd_assert (mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 3627 stop_worker_pool_threads (d, d->threading.hier.pool.num); 3628 return; 3629 } 3630 #else /* ! MHD_SUPPORT_THREADS */ 3631 mhd_UNREACHABLE (); 3632 return; 3633 #endif /* ! MHD_SUPPORT_THREADS */ 3634 } 3635 } 3636 3637 3638 /** 3639 * Close all daemon connections for modes without internal threads 3640 * @param d the daemon object 3641 */ 3642 static MHD_FN_PAR_NONNULL_ (1) void 3643 daemon_close_connections (struct MHD_Daemon *restrict d) 3644 { 3645 if (mhd_WM_INT_HAS_THREADS (d->wmode_int)) 3646 { 3647 /* In these modes connections must be closed in the daemon thread */ 3648 mhd_assert (NULL == mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)); 3649 return; 3650 } 3651 3652 mhd_daemon_close_all_conns (d); 3653 } 3654 3655 3656 /** 3657 * Internal daemon initialisation function. 3658 * This function calls all required initialisation stages one-by-one. 3659 * @param d the daemon object 3660 * @param s the user settings 3661 * @return #MHD_SC_OK on success, 3662 * the error code otherwise 3663 */ 3664 static MHD_FN_PAR_NONNULL_ (1) MHD_FN_PAR_NONNULL_ (2) 3665 MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3666 daemon_start_internal (struct MHD_Daemon *restrict d, 3667 struct DaemonOptions *restrict s) 3668 { 3669 enum MHD_StatusCode res; 3670 3671 res = daemon_set_basic_settings (d, s); 3672 if (MHD_SC_OK != res) 3673 return res; 3674 3675 res = daemon_set_work_mode (d, s); 3676 if (MHD_SC_OK != res) 3677 return res; 3678 3679 res = daemon_init_net (d, s); 3680 if (MHD_SC_OK != res) 3681 return res; 3682 3683 mhd_assert (d->dbg.net_inited); 3684 3685 res = daemon_init_auth_digest (d, s); 3686 3687 if (MHD_SC_OK == res) 3688 { 3689 res = daemon_init_tls (d, s); 3690 if (MHD_SC_OK == res) 3691 { 3692 mhd_assert (d->dbg.tls_inited); 3693 res = daemon_init_threading_and_conn (d, s); 3694 if (MHD_SC_OK == res) 3695 { 3696 mhd_assert (d->dbg.threading_inited); 3697 mhd_assert (! mhd_D_TYPE_IS_INTERNAL_ONLY (d->threading.d_type)); 3698 3699 res = daemon_init_large_buf (d, s); 3700 if (MHD_SC_OK == res) 3701 { 3702 res = daemon_start_threads (d); 3703 if (MHD_SC_OK == res) 3704 { 3705 return MHD_SC_OK; 3706 } 3707 3708 /* Below is a clean-up path */ 3709 daemon_deinit_large_buf (d); 3710 } 3711 daemon_deinit_threading_and_conn (d); 3712 } 3713 daemon_deinit_tls (d); 3714 } 3715 daemon_deinit_auth_digest (d); 3716 } 3717 daemon_deinit_net (d); 3718 mhd_assert (MHD_SC_OK != res); 3719 return res; 3720 } 3721 3722 3723 MHD_EXTERN_ 3724 MHD_FN_PAR_NONNULL_ (1) MHD_FN_MUST_CHECK_RESULT_ enum MHD_StatusCode 3725 MHD_daemon_start (struct MHD_Daemon *daemon) 3726 { 3727 struct MHD_Daemon *const d = daemon; /* a short alias */ 3728 struct DaemonOptions *const s = daemon->settings; /* a short alias */ 3729 enum MHD_StatusCode res; 3730 3731 if (mhd_DAEMON_STATE_NOT_STARTED != daemon->state) 3732 return MHD_SC_TOO_LATE; 3733 3734 mhd_assert (NULL != s); 3735 3736 d->state = mhd_DAEMON_STATE_STARTING; 3737 res = daemon_start_internal (d, s); 3738 3739 d->settings = NULL; 3740 dsettings_release (s); 3741 3742 d->state = 3743 (MHD_SC_OK == res) ? mhd_DAEMON_STATE_STARTED : mhd_DAEMON_STATE_FAILED; 3744 3745 return res; 3746 } 3747 3748 3749 MHD_EXTERN_ MHD_FN_PAR_NONNULL_ALL_ void 3750 MHD_daemon_destroy (struct MHD_Daemon *daemon) 3751 { 3752 bool not_yet_started = (mhd_DAEMON_STATE_NOT_STARTED == daemon->state); 3753 bool has_failed = (mhd_DAEMON_STATE_FAILED == daemon->state); 3754 mhd_assert (mhd_DAEMON_STATE_STOPPING > daemon->state); 3755 mhd_assert (mhd_DAEMON_STATE_STARTING != daemon->state); 3756 3757 daemon->state = mhd_DAEMON_STATE_STOPPING; 3758 if (not_yet_started) 3759 { 3760 mhd_assert (NULL != daemon->settings); 3761 dsettings_release (daemon->settings); 3762 } 3763 else if (! has_failed) 3764 { 3765 mhd_assert (NULL == daemon->settings); 3766 mhd_assert (daemon->dbg.threading_inited); 3767 3768 daemon_stop_threads (daemon); 3769 3770 daemon_close_connections (daemon); 3771 3772 daemon_deinit_threading_and_conn (daemon); 3773 3774 daemon_deinit_large_buf (daemon); 3775 3776 daemon_deinit_tls (daemon); 3777 3778 daemon_deinit_auth_digest (daemon); 3779 3780 daemon_deinit_net (daemon); 3781 } 3782 daemon->state = mhd_DAEMON_STATE_STOPPED; /* Useful only for debugging */ 3783 3784 free (daemon); 3785 3786 mhd_lib_deinit_global_if_needed (); 3787 }