events_process.c (68500B)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */ 2 /* 3 This file is part of GNU libmicrohttpd. 4 Copyright (C) 2024 Evgeny Grin (Karlson2k) 5 6 GNU libmicrohttpd is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Lesser General Public 8 License as published by the Free Software Foundation; either 9 version 2.1 of the License, or (at your option) any later version. 10 11 GNU libmicrohttpd is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Lesser General Public License for more details. 15 16 Alternatively, you can redistribute GNU libmicrohttpd and/or 17 modify it under the terms of the GNU General Public License as 18 published by the Free Software Foundation; either version 2 of 19 the License, or (at your option) any later version, together 20 with the eCos exception, as follows: 21 22 As a special exception, if other files instantiate templates or 23 use macros or inline functions from this file, or you compile this 24 file and link it with other works to produce a work based on this 25 file, this file does not by itself cause the resulting work to be 26 covered by the GNU General Public License. However the source code 27 for this file must still be made available in accordance with 28 section (3) of the GNU General Public License v2. 29 30 This exception does not invalidate any other reasons why a work 31 based on this file might be covered by the GNU General Public 32 License. 33 34 You should have received copies of the GNU Lesser General Public 35 License and the GNU General Public License along with this library; 36 if not, see <https://www.gnu.org/licenses/>. 37 */ 38 39 /** 40 * @file src/mhd2/events_process.c 41 * @brief The implementation of events processing functions 42 * @author Karlson2k (Evgeny Grin) 43 */ 44 45 #include "mhd_sys_options.h" 46 #include "events_process.h" 47 48 #include "mhd_assert.h" 49 #include "mhd_unreachable.h" 50 51 #include "mhd_predict.h" 52 53 #if defined(mhd_DEBUG_SUSPEND_RESUME) || defined(mhd_DEBUG_POLLING_FDS) 54 # include <stdio.h> 55 #endif /* mhd_DEBUG_SUSPEND_RESUME */ 56 57 #include "mhd_locks.h" 58 59 #include "mhd_socket_type.h" 60 #include "sys_poll.h" 61 #include "sys_select.h" 62 #ifdef MHD_SUPPORT_EPOLL 63 # include <sys/epoll.h> 64 #endif 65 #ifdef MHD_SOCKETS_KIND_POSIX 66 # include "sys_errno.h" 67 #endif 68 69 #include "mhd_itc.h" 70 71 #include "mhd_panic.h" 72 #include "mhd_dbg_print.h" 73 74 #include "mhd_sockets_macros.h" 75 76 #include "mhd_daemon.h" 77 #include "mhd_connection.h" 78 79 #include "mhd_mono_clock.h" 80 81 #include "conn_timeout.h" 82 #include "conn_mark_ready.h" 83 #include "daemon_logger.h" 84 #include "daemon_add_conn.h" 85 #include "daemon_funcs.h" 86 #include "conn_data_process.h" 87 #include "stream_funcs.h" 88 #include "extr_events_funcs.h" 89 90 #ifdef MHD_SUPPORT_UPGRADE 91 # include "upgrade_proc.h" 92 #endif /* MHD_SUPPORT_UPGRADE */ 93 94 #ifdef MHD_SUPPORT_HTTPS 95 # include "mhd_tls_funcs.h" 96 #endif 97 98 #ifdef MHD_SUPPORT_HTTP2 99 # include "h2/h2_comm.h" 100 #endif 101 102 #include "mhd_public_api.h" 103 104 #ifdef mhd_DEBUG_POLLING_FDS 105 /** 106 * Debug-printf request of FD polling/monitoring 107 * @param fd_name the name of FD ("ITC", "lstn" or "conn") 108 * @param fd the FD value 109 * @param r_ready the request for read (or receive) readiness 110 * @param w_ready the request for write (or send) readiness 111 * @param e_ready the request for exception (or error) readiness 112 */ 113 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void 114 mhd_dbg_print_fd_mon_req (const char *fd_name, 115 MHD_Socket fd, 116 bool r_ready, 117 bool w_ready, 118 bool e_ready) 119 { 120 char state_str[] = "x:x:x"; 121 state_str[0] = r_ready ? 'R' : '-'; 122 state_str[2] = w_ready ? 'W' : '-'; 123 state_str[4] = e_ready ? 'E' : '-'; 124 125 fprintf (stderr, 126 "### Set FD watching: %4s [%2llu] for %s\n", 127 fd_name, 128 (unsigned long long) fd, 129 state_str); 130 } 131 132 133 /** 134 * Debug-printf reported (by polling) status of FD 135 * @param fd_name the name of FD ("ITC", "lstn" or "conn") 136 * @param fd the FD value 137 * @param r_ready the read (or receive) readiness 138 * @param w_ready the write (or send) readiness 139 * @param e_ready the exception (or error) readiness 140 */ 141 static MHD_FN_PAR_NONNULL_ALL_ void 142 dbg_print_fd_state_update (const char *fd_name, 143 MHD_Socket fd, 144 bool r_ready, 145 bool w_ready, 146 bool e_ready) 147 { 148 char state_str[] = "x:x:x"; 149 state_str[0] = r_ready ? 'R' : '-'; 150 state_str[2] = w_ready ? 'W' : '-'; 151 state_str[4] = e_ready ? 'E' : '-'; 152 153 fprintf (stderr, 154 "### FD state update: %4s [%2llu] -> %s\n", 155 fd_name, 156 (unsigned long long) fd, 157 state_str); 158 } 159 160 161 #else /* ! mhd_DEBUG_POLLING_FDS */ 162 # define dbg_print_fd_state_update(fd_n,fd,r_ready,w_ready,e_ready) \ 163 ((void) 0) 164 #endif /* ! mhd_DEBUG_POLLING_FDS */ 165 166 #ifdef MHD_SUPPORT_THREADS 167 /** 168 * Log error message about broken ITC 169 * @param d the daemon to use 170 */ 171 static MHD_FN_PAR_NONNULL_ALL_ void 172 log_itc_broken (struct MHD_Daemon *restrict d) 173 { 174 mhd_LOG_MSG (d, \ 175 MHD_SC_ITC_STATUS_ERROR, \ 176 "System reported that ITC has an error status or broken."); 177 } 178 179 180 #endif /* MHD_SUPPORT_THREADS */ 181 182 /** 183 * Log error message about broken listen socket 184 * @param d the daemon to use 185 */ 186 static MHD_FN_PAR_NONNULL_ALL_ void 187 log_listen_broken (struct MHD_Daemon *restrict d) 188 { 189 mhd_LOG_MSG (d, MHD_SC_LISTEN_STATUS_ERROR, \ 190 "System reported that the listening socket has an error " \ 191 "status or broken. The daemon will not listen any more."); 192 } 193 194 195 static MHD_FN_PAR_NONNULL_ALL_ uint_fast64_t 196 mhd_daemon_get_wait_erliest_timeout (const struct MHD_Daemon *restrict d) 197 { 198 uint_fast64_t ret; 199 uint_fast64_t cur_milsec; 200 const struct MHD_Connection *c; 201 202 c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.def_timeout)); 203 if ((NULL == c) 204 && (NULL == mhd_DLINKEDL_GET_LAST_D (&(d->conns.cust_timeout)))) 205 return MHD_WAIT_INDEFINITELY; 206 207 /* Do not use mhd_daemon_get_milsec_counter() as actual time is required 208 here */ 209 cur_milsec = mhd_monotonic_msec_counter (); 210 211 /* Check just the first connection in the ordered "default timeout" list */ 212 if (NULL != c) 213 ret = mhd_conn_get_timeout_left (c, 214 cur_milsec); 215 else 216 ret = MHD_WAIT_INDEFINITELY; 217 218 for (c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.cust_timeout)); 219 (NULL != c) && (0u != ret); 220 c = mhd_DLINKEDL_GET_PREV (&(c->timeout), 221 tmout_list)) 222 { 223 uint_fast64_t conn_tmout_left; 224 conn_tmout_left = mhd_conn_get_timeout_left (c, 225 cur_milsec); 226 if (ret > conn_tmout_left) 227 ret = conn_tmout_left; 228 } 229 230 return ret; 231 } 232 233 234 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ uint_fast64_t 235 mhd_daemon_get_wait_max (const struct MHD_Daemon *restrict d) 236 { 237 uint_fast64_t ret; 238 239 mhd_assert (! mhd_D_HAS_WORKERS (d)); 240 241 if (d->events.accept_pending && ! d->conns.block_new) 242 { 243 #ifdef mhd_DEBUG_POLLING_FDS 244 fprintf (stderr, 245 "### mhd_daemon_get_wait_max(daemon) -> zero " 246 "(accept new conn pending)\n"); 247 #endif 248 return 0; 249 } 250 if (d->events.act_req.resume) 251 { 252 #ifdef mhd_DEBUG_POLLING_FDS 253 fprintf (stderr, 254 "### mhd_daemon_get_wait_max(daemon) -> zero " 255 "(resume connection pending)\n"); 256 #endif 257 return 0; 258 } 259 if (NULL != mhd_DLINKEDL_GET_FIRST (&(d->events), proc_ready)) 260 { 261 #ifdef mhd_DEBUG_POLLING_FDS 262 fprintf (stderr, 263 "### mhd_daemon_get_wait_max(daemon) -> zero " 264 "(connection(s) is already ready)\n"); 265 #endif 266 return 0; 267 } 268 if (NULL != mhd_DLINKEDL_GET_FIRST (&(d->events.act_req.ext_added.worker), 269 queue)) 270 { 271 #ifdef mhd_DEBUG_POLLING_FDS 272 fprintf (stderr, 273 "### mhd_daemon_get_wait_max(daemon) -> zero " 274 "(externally added connection(s) pending)\n"); 275 #endif 276 return 0; 277 } 278 279 ret = mhd_daemon_get_wait_erliest_timeout (d); 280 281 #ifdef mhd_DEBUG_POLLING_FDS 282 if (MHD_WAIT_INDEFINITELY == ret) 283 fprintf (stderr, 284 "### mhd_daemon_get_wait_max(daemon) -> MHD_WAIT_INDEFINITELY\n"); 285 else 286 fprintf (stderr, 287 "### mhd_daemon_get_wait_max(daemon) -> %lu\n", 288 (unsigned long) ret); 289 #endif 290 291 return ret; 292 } 293 294 295 static MHD_FN_PAR_NONNULL_ALL_ void 296 start_resuming_connection (struct MHD_Connection *restrict c, 297 struct MHD_Daemon *restrict d) 298 { 299 mhd_assert (c->suspended); 300 #ifdef mhd_DEBUG_SUSPEND_RESUME 301 fprintf (stderr, 302 "%%%%%% Resuming connection, FD: %2llu\n", 303 (unsigned long long) c->sk.fd); 304 #endif /* mhd_DEBUG_SUSPEND_RESUME */ 305 c->suspended = false; 306 mhd_conn_init_activity_timeout (c, 307 c->timeout.milsec); 308 mhd_conn_mark_ready (c, d); /* Force processing connection in this round */ 309 } 310 311 312 /** 313 * Check whether any resuming connections are pending and resume them 314 * @param d the daemon to use 315 */ 316 static MHD_FN_PAR_NONNULL_ALL_ void 317 daemon_resume_conns_if_needed (struct MHD_Daemon *restrict d) 318 { 319 struct MHD_Connection *c; 320 321 if (! d->events.act_req.resume) 322 return; 323 324 d->events.act_req.resume = false; /* Reset flag before processing data */ 325 326 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); 327 NULL != c; 328 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 329 { 330 if (c->resuming) 331 start_resuming_connection (c, d); 332 } 333 } 334 335 336 #if defined (MHD_SUPPORT_POLL) || defined(MHD_SUPPORT_EPOLL) 337 338 mhd_DATA_TRUNCATION_RUNTIME_CHECK_DISABLE 339 340 static MHD_FN_PAR_NONNULL_ALL_ int 341 get_max_wait (const struct MHD_Daemon *restrict d) 342 { 343 const uint_fast64_t ui64_wait = mhd_daemon_get_wait_max (d); 344 int i_wait = (int) ui64_wait; 345 346 if (MHD_WAIT_INDEFINITELY <= ui64_wait) 347 return -1; 348 349 if (mhd_COND_ALMOST_NEVER ((0 > i_wait) || 350 (ui64_wait != (uint_fast64_t) i_wait))) 351 return INT_MAX; 352 353 return i_wait; 354 } 355 356 357 mhd_DATA_TRUNCATION_RUNTIME_CHECK_RESTORE 358 /* End of warning-less data truncation */ 359 360 #endif 361 /* MHD_SUPPORT_POLL || MHD_SUPPORT_EPOLL */ 362 363 364 MHD_FN_PAR_NONNULL_ (1) static void 365 update_conn_net_status (struct MHD_Daemon *restrict d, 366 struct MHD_Connection *restrict c, 367 bool recv_ready, 368 bool send_ready, 369 bool err_state) 370 { 371 enum mhd_SocketNetState sk_state; 372 373 mhd_assert (d == c->daemon); 374 /* "resuming" must be not processed yet */ 375 mhd_assert (! c->resuming || c->suspended); 376 377 dbg_print_fd_state_update ("conn", \ 378 c->sk.fd, \ 379 recv_ready, \ 380 send_ready, \ 381 err_state); 382 383 sk_state = mhd_SOCKET_NET_STATE_NOTHING; 384 if (recv_ready) 385 sk_state = (enum mhd_SocketNetState) 386 (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_RECV_READY); 387 if (send_ready) 388 sk_state = (enum mhd_SocketNetState) 389 (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_SEND_READY); 390 if (err_state) 391 sk_state = (enum mhd_SocketNetState) 392 (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_ERROR_READY); 393 c->sk.ready = sk_state; 394 395 if (! c->suspended) 396 mhd_conn_mark_ready_update3 (c, err_state, d); 397 else 398 mhd_assert (! c->in_proc_ready); 399 } 400 401 402 /** 403 * Accept new connections on the daemon 404 * @param d the daemon to use 405 * @return true if all incoming connections has been accepted, 406 * false if some connection may still wait to be accepted 407 */ 408 MHD_FN_PAR_NONNULL_ (1) static bool 409 daemon_accept_new_conns (struct MHD_Daemon *restrict d) 410 { 411 unsigned int num_to_accept; 412 mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd); 413 mhd_assert (! d->net.listen.is_broken); 414 mhd_assert (! d->conns.block_new); 415 mhd_assert (d->conns.count < d->conns.cfg.count_limit); 416 mhd_assert (! mhd_D_HAS_WORKERS (d)); 417 418 if (! d->net.listen.non_block) 419 num_to_accept = 1; /* listen socket is blocking, only one connection can be processed */ 420 else 421 { 422 const unsigned int slots_left = d->conns.cfg.count_limit - d->conns.count; 423 if (! mhd_D_HAS_MASTER (d)) 424 { 425 /* Fill up to one quarter of allowed limit in one turn */ 426 num_to_accept = d->conns.cfg.count_limit / 4; 427 /* Limit to a reasonable number */ 428 if (((sizeof(void *) > 4) ? 4096 : 1024) < num_to_accept) 429 num_to_accept = ((sizeof(void *) > 4) ? 4096 : 1024); 430 if (slots_left < num_to_accept) 431 num_to_accept = slots_left; 432 } 433 #ifdef MHD_SUPPORT_THREADS 434 else 435 { 436 /* Has workers thread pool. Care must be taken to evenly distribute 437 new connections in the workers pool. 438 At the same time, the burst of new connections should be handled as 439 quick as possible. */ 440 const unsigned int num_conn = d->conns.count; 441 const unsigned int limit = d->conns.cfg.count_limit; 442 const unsigned int num_workers = 443 d->threading.hier.master->threading.hier.pool.num; 444 if (num_conn < limit / 16) 445 { 446 num_to_accept = num_conn / num_workers; 447 if (8 > num_to_accept) 448 { 449 if (8 > slots_left / 16) 450 num_to_accept = slots_left / 16; 451 else 452 num_to_accept = 8; 453 } 454 if (64 < num_to_accept) 455 num_to_accept = 64; 456 } 457 else if (num_conn < limit / 8) 458 { 459 num_to_accept = num_conn * 2 / num_workers; 460 if (8 > num_to_accept) 461 { 462 if (8 > slots_left / 8) 463 num_to_accept = slots_left / 8; 464 else 465 num_to_accept = 8; 466 } 467 if (128 < num_to_accept) 468 num_to_accept = 128; 469 } 470 else if (num_conn < limit / 4) 471 { 472 num_to_accept = num_conn * 4 / num_workers; 473 if (8 > num_to_accept) 474 num_to_accept = 8; 475 if (slots_left / 4 < num_to_accept) 476 num_to_accept = slots_left / 4; 477 if (256 < num_to_accept) 478 num_to_accept = 256; 479 } 480 else if (num_conn < limit / 2) 481 { 482 num_to_accept = num_conn * 8 / num_workers; 483 if (16 > num_to_accept) 484 num_to_accept = 16; 485 if (slots_left / 4 < num_to_accept) 486 num_to_accept = slots_left / 4; 487 if (256 < num_to_accept) 488 num_to_accept = 256; 489 } 490 else if (slots_left > limit / 4) 491 { 492 num_to_accept = slots_left * 4 / num_workers; 493 if (slots_left / 8 < num_to_accept) 494 num_to_accept = slots_left / 8; 495 if (128 < num_to_accept) 496 num_to_accept = 128; 497 } 498 else if (slots_left > limit / 8) 499 { 500 num_to_accept = slots_left * 2 / num_workers; 501 if (slots_left / 16 < num_to_accept) 502 num_to_accept = slots_left / 16; 503 if (64 < num_to_accept) 504 num_to_accept = 64; 505 } 506 else /* (slots_left <= limit / 8) */ 507 num_to_accept = slots_left / 16; 508 509 if (0 == num_to_accept) 510 num_to_accept = 1; 511 else if (slots_left > num_to_accept) 512 num_to_accept = slots_left; 513 } 514 #endif /* MHD_SUPPORT_THREADS */ 515 } 516 517 while (0 != --num_to_accept) 518 { 519 enum mhd_DaemonAcceptResult res; 520 res = mhd_daemon_accept_connection (d); 521 if (mhd_DAEMON_ACCEPT_NO_MORE_PENDING == res) 522 return true; 523 if (mhd_DAEMON_ACCEPT_FAILED == res) 524 return false; /* This is probably "no system resources" error. 525 To do try to accept more connections now. */ 526 } 527 return false; /* More connections may need to be accepted */ 528 } 529 530 531 /** 532 * Check whether particular connection should be excluded from standard HTTP 533 * communication. 534 * @param c the connection the check 535 * @return 'true' if connection should not be used for HTTP communication 536 * 'false' if connection should be processed as HTTP 537 */ 538 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ bool 539 is_conn_excluded_from_http_comm (struct MHD_Connection *restrict c) 540 { 541 #ifdef MHD_SUPPORT_UPGRADE 542 if (NULL != c->upgr.c) 543 { 544 mhd_assert ((mhd_HTTP_STAGE_UPGRADED == c->stage) || \ 545 (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage)); 546 return true; 547 } 548 #endif /* MHD_SUPPORT_UPGRADE */ 549 550 return c->suspended; 551 } 552 553 554 static bool 555 daemon_process_all_active_conns (struct MHD_Daemon *restrict d) 556 { 557 struct MHD_Connection *c; 558 mhd_assert (! mhd_D_HAS_WORKERS (d)); 559 560 c = mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready); 561 while (NULL != c) 562 { 563 struct MHD_Connection *next; 564 /* The current connection can be closed or removed from 565 "ready" list */ 566 next = mhd_DLINKEDL_GET_NEXT (c, proc_ready); 567 if (! mhd_conn_process_recv_send_data (c)) 568 { 569 mhd_conn_pre_clean (c); 570 mhd_conn_remove_from_daemon (c); 571 mhd_conn_close_final (c); 572 } 573 else 574 { 575 mhd_assert (! c->resuming || c->suspended); 576 } 577 578 c = next; 579 } 580 return true; 581 } 582 583 584 #ifdef MHD_SUPPORT_UPGRADE 585 /** 586 * Clean-up all HTTP-Upgraded connections scheduled for clean-up 587 * @param d the daemon to process 588 */ 589 static MHD_FN_PAR_NONNULL_ALL_ void 590 daemon_cleanup_upgraded_conns (struct MHD_Daemon *d) 591 { 592 volatile struct MHD_Daemon *voltl_d = d; 593 mhd_assert (! mhd_D_HAS_WORKERS (d)); 594 595 if (NULL == mhd_DLINKEDL_GET_FIRST (&(voltl_d->conns.upgr), upgr_cleanup)) 596 return; 597 598 while (true) 599 { 600 struct MHD_Connection *c; 601 602 mhd_mutex_lock_chk (&(d->conns.upgr.ucu_lock)); 603 c = mhd_DLINKEDL_GET_FIRST (&(d->conns.upgr), upgr_cleanup); 604 if (NULL != c) 605 mhd_DLINKEDL_DEL (&(d->conns.upgr), c, upgr_cleanup); 606 mhd_mutex_unlock_chk (&(d->conns.upgr.ucu_lock)); 607 608 if (NULL == c) 609 break; 610 611 mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage); 612 mhd_upgraded_deinit (c); 613 mhd_conn_pre_clean (c); 614 mhd_conn_remove_from_daemon (c); 615 mhd_conn_close_final (c); 616 } 617 } 618 619 620 #else /* ! MHD_SUPPORT_UPGRADE */ 621 #define daemon_cleanup_upgraded_conns(d) ((void) d) 622 #endif /* ! MHD_SUPPORT_UPGRADE */ 623 624 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void 625 mhd_daemon_close_all_conns (struct MHD_Daemon *d) 626 { 627 struct MHD_Connection *c; 628 bool has_upgraded_unclosed; 629 630 has_upgraded_unclosed = false; 631 if (! mhd_D_HAS_THR_PER_CONN (d)) 632 { 633 for (c = mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn); 634 NULL != c; 635 c = mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)) 636 { 637 #ifdef MHD_SUPPORT_UPGRADE 638 mhd_assert (mhd_HTTP_STAGE_UPGRADING != c->stage); 639 mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING != c->stage); 640 if (NULL != c->upgr.c) 641 { 642 mhd_assert (c == c->upgr.c); 643 has_upgraded_unclosed = true; 644 mhd_upgraded_deinit (c); 645 } 646 else /* Combined with the next 'if' */ 647 #endif 648 if (1) 649 { 650 #ifdef MHD_SUPPORT_HTTP2 651 if (mhd_C_IS_HTTP2 (c)) 652 mhd_h2_conn_h2_deinit_start_closing (c); 653 else 654 #endif /* MHD_SUPPORT_HTTP2 */ 655 mhd_conn_start_closing_d_shutdown (c); 656 } 657 mhd_conn_pre_clean (c); 658 mhd_conn_remove_from_daemon (c); 659 mhd_conn_close_final (c); 660 } 661 } 662 else 663 mhd_assert (0 && "Not implemented yet"); 664 665 if (has_upgraded_unclosed) 666 mhd_LOG_MSG (d, MHD_SC_DAEMON_DESTROYED_WITH_UNCLOSED_UPGRADED, \ 667 "The daemon is being destroyed, but at least one " \ 668 "HTTP-Upgraded connection is unclosed. Any use (including " \ 669 "closing) of such connections is undefined behaviour."); 670 } 671 672 673 /** 674 * Process all external events updated of existing connections, information 675 * about new connections pending to be accept()'ed, presence of the events on 676 * the daemon's ITC; resume connections. 677 * @return 'true' if processed successfully, 678 * 'false' is unrecoverable error occurs and the daemon must be 679 * closed 680 */ 681 static MHD_FN_PAR_NONNULL_ (1) bool 682 ext_events_process_net_updates_and_resume_conn (struct MHD_Daemon *restrict d) 683 { 684 struct MHD_Connection *restrict c; 685 686 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 687 mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type); 688 689 d->events.act_req.resume = false; /* Reset flag before processing data */ 690 691 #ifdef MHD_SUPPORT_THREADS 692 if (d->events.data.extr.itc_data.is_active) 693 { 694 d->events.data.extr.itc_data.is_active = false; 695 /* Clear ITC here, before other data processing. 696 * Any external events will activate ITC again if additional data to 697 * process is added externally. Clearing ITC early ensures that new data 698 * (with additional ITC activation) will not be missed. */ 699 mhd_itc_clear (d->threading.itc); 700 } 701 #endif /* MHD_SUPPORT_THREADS */ 702 703 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); 704 NULL != c; 705 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 706 { 707 bool has_err_state; 708 709 if (c->resuming) 710 start_resuming_connection (c, d); 711 else 712 { 713 if (is_conn_excluded_from_http_comm (c)) 714 { 715 mhd_assert (! c->in_proc_ready); 716 continue; 717 } 718 719 has_err_state = (0 != (((unsigned int) c->sk.ready) 720 & mhd_SOCKET_NET_STATE_ERROR_READY)); 721 722 mhd_conn_mark_ready_update3 (c, 723 has_err_state, 724 d); 725 } 726 } 727 728 return true; 729 } 730 731 732 /** 733 * Update all registrations of FDs for external monitoring. 734 * @return #MHD_SC_OK on success, 735 * error code otherwise 736 */ 737 static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 738 ext_events_update_registrations (struct MHD_Daemon *restrict d) 739 { 740 const bool rereg_all = d->events.data.extr.reg_all; 741 const bool edge_trigg = (mhd_WM_INT_EXTERNAL_EVENTS_EDGE == d->wmode_int); 742 bool daemon_fds_succeed; 743 struct MHD_Connection *c; 744 struct MHD_Connection *c_next; 745 746 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 747 mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type); 748 749 /* (Re-)register daemon's FDs */ 750 751 #ifdef MHD_SUPPORT_THREADS 752 if (rereg_all || 753 (NULL == d->events.data.extr.itc_data.app_cntx)) 754 { 755 /* (Re-)register ITC FD */ 756 d->events.data.extr.itc_data.app_cntx = 757 mhd_daemon_extr_event_reg (d, 758 mhd_itc_r_fd (d->threading.itc), 759 MHD_FD_STATE_RECV_EXCEPT, 760 d->events.data.extr.itc_data.app_cntx, 761 (struct MHD_EventUpdateContext *) 762 mhd_SOCKET_REL_MARKER_ITC); 763 } 764 daemon_fds_succeed = (NULL != d->events.data.extr.itc_data.app_cntx); 765 #else /* ! MHD_SUPPORT_THREADS */ 766 daemon_fds_succeed = true; 767 #endif /* ! MHD_SUPPORT_THREADS */ 768 769 if (daemon_fds_succeed) 770 { 771 if ((MHD_INVALID_SOCKET == d->net.listen.fd) && 772 (NULL != d->events.data.extr.listen_data.app_cntx)) 773 { 774 /* De-register the listen FD */ 775 d->events.data.extr.listen_data.app_cntx = 776 mhd_daemon_extr_event_reg (d, 777 d->net.listen.fd, 778 MHD_FD_STATE_NONE, 779 d->events.data.extr.listen_data.app_cntx, 780 (struct MHD_EventUpdateContext *) 781 mhd_SOCKET_REL_MARKER_LISTEN); 782 if (NULL != d->events.data.extr.listen_data.app_cntx) 783 mhd_log_extr_event_dereg_failed (d); 784 } 785 else if ((MHD_INVALID_SOCKET != d->net.listen.fd) && 786 (rereg_all || (NULL == d->events.data.extr.listen_data.app_cntx))) 787 { 788 /* (Re-)register listen FD */ 789 d->events.data.extr.listen_data.app_cntx = 790 mhd_daemon_extr_event_reg (d, 791 d->net.listen.fd, 792 MHD_FD_STATE_RECV_EXCEPT, 793 d->events.data.extr.listen_data.app_cntx, 794 (struct MHD_EventUpdateContext *) 795 mhd_SOCKET_REL_MARKER_LISTEN); 796 797 daemon_fds_succeed = (NULL != d->events.data.extr.listen_data.app_cntx); 798 } 799 } 800 801 if (! daemon_fds_succeed) 802 { 803 mhd_LOG_MSG (d, MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE, \ 804 "Failed to register daemon FDs in the application " 805 "(external events) monitoring."); 806 return MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE; 807 } 808 809 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); 810 NULL != c; 811 c = c_next) 812 { 813 enum MHD_FdState watch_for; 814 815 /* Get the next connection now, as the current connection could be removed 816 from the daemon. */ 817 c_next = mhd_DLINKEDL_GET_NEXT (c,all_conn); 818 819 mhd_assert (! c->resuming || c->suspended); 820 821 if (is_conn_excluded_from_http_comm (c)) 822 { 823 if (NULL != c->extr_event.app_cntx) 824 { 825 /* De-register the connection socket FD */ 826 c->extr_event.app_cntx = 827 mhd_daemon_extr_event_reg (d, 828 c->sk.fd, 829 MHD_FD_STATE_NONE, 830 c->extr_event.app_cntx, 831 (struct MHD_EventUpdateContext *) c); 832 if (NULL != c->extr_event.app_cntx) 833 mhd_log_extr_event_dereg_failed (d); 834 } 835 continue; 836 } 837 838 watch_for = 839 edge_trigg ? 840 MHD_FD_STATE_RECV_SEND_EXCEPT : 841 (enum MHD_FdState) (MHD_FD_STATE_EXCEPT 842 | (((unsigned int) c->event_loop_info) 843 & (MHD_EVENT_LOOP_INFO_RECV 844 | MHD_EVENT_LOOP_INFO_SEND))); 845 846 mhd_assert ((! edge_trigg) || \ 847 (MHD_FD_STATE_RECV_SEND_EXCEPT == c->extr_event.reg_for) || \ 848 (NULL == c->extr_event.app_cntx)); 849 850 if ((NULL == c->extr_event.app_cntx) || 851 rereg_all || 852 (! edge_trigg && (watch_for != c->extr_event.reg_for))) 853 { 854 /* (Re-)register the connection socket FD */ 855 c->extr_event.app_cntx = 856 mhd_daemon_extr_event_reg (d, 857 c->sk.fd, 858 watch_for, 859 c->extr_event.app_cntx, 860 (struct MHD_EventUpdateContext *) c); 861 if (NULL == c->extr_event.app_cntx) 862 { 863 mhd_conn_start_closing_ext_event_failed (c); 864 mhd_conn_pre_clean (c); 865 mhd_conn_remove_from_daemon (c); 866 mhd_conn_close_final (c); 867 } 868 c->extr_event.reg_for = watch_for; 869 } 870 } 871 872 return MHD_SC_OK; 873 } 874 875 876 #ifdef MHD_SUPPORT_SELECT 877 878 /** 879 * Add socket to the fd_set 880 * @param fd the socket to add 881 * @param fs the pointer to fd_set 882 * @param max the pointer to variable to be updated with maximum FD value (or 883 * set to non-zero in case of WinSock) 884 * @param d the daemon object 885 */ 886 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ 887 MHD_FN_PAR_INOUT_ (2) 888 MHD_FN_PAR_INOUT_ (3) void 889 fd_set_wrap (MHD_Socket fd, 890 fd_set *restrict fs, 891 int *restrict max, 892 struct MHD_Daemon *restrict d) 893 { 894 mhd_assert (mhd_FD_FITS_DAEMON (d, fd)); /* Must be checked for every FD before 895 it is added */ 896 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 897 (void) d; /* Unused with non-debug builds */ 898 #if defined(MHD_SOCKETS_KIND_POSIX) 899 FD_SET (fd, fs); 900 if (*max < fd) 901 *max = fd; 902 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 903 /* Use custom set function to take advantage of know uniqueness of 904 * used sockets (to skip useless (for this function) check for duplicated 905 * sockets implemented in system's macro). */ 906 mhd_assert (fs->fd_count < FD_SETSIZE - 1); /* Daemon limits set to always fit FD_SETSIZE */ 907 mhd_assert (! FD_ISSET (fd, fs)); /* All sockets must be unique */ 908 fs->fd_array[fs->fd_count++] = fd; 909 *max = 1; 910 #else 911 #error Unknown sockets type 912 #endif 913 } 914 915 916 /** 917 * Set daemon's FD_SETs to monitor all daemon's sockets 918 * @param d the daemon to use 919 * @param listen_only set to 'true' if connections's sockets should NOT 920 * be monitored 921 * @return with POSIX sockets: the maximum number of the socket used in 922 * the FD_SETs; 923 * with winsock: non-zero if at least one socket has been added to 924 * the FD_SETs, 925 * zero if no sockets in the FD_SETs 926 */ 927 static MHD_FN_PAR_NONNULL_ (1) int 928 select_update_fdsets (struct MHD_Daemon *restrict d, 929 bool listen_only) 930 { 931 struct MHD_Connection *c; 932 fd_set *const restrict rfds = d->events.data.select.rfds; 933 fd_set *const restrict wfds = d->events.data.select.wfds; 934 fd_set *const restrict efds = d->events.data.select.efds; 935 int ret; 936 937 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 938 mhd_assert (NULL != rfds); 939 mhd_assert (NULL != wfds); 940 mhd_assert (NULL != efds); 941 FD_ZERO (rfds); 942 FD_ZERO (wfds); 943 FD_ZERO (efds); 944 945 ret = 0; 946 #ifdef MHD_SUPPORT_THREADS 947 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 948 fd_set_wrap (mhd_itc_r_fd (d->threading.itc), 949 rfds, 950 &ret, 951 d); 952 fd_set_wrap (mhd_itc_r_fd (d->threading.itc), 953 efds, 954 &ret, 955 d); 956 mhd_dbg_print_fd_mon_req ("ITC", \ 957 mhd_itc_r_fd (d->threading.itc), \ 958 true, \ 959 false, \ 960 true); 961 #endif 962 if ((MHD_INVALID_SOCKET != d->net.listen.fd) 963 && ! d->conns.block_new) 964 { 965 mhd_assert (! d->net.listen.is_broken); 966 967 fd_set_wrap (d->net.listen.fd, 968 rfds, 969 &ret, 970 d); 971 fd_set_wrap (d->net.listen.fd, 972 efds, 973 &ret, 974 d); 975 mhd_dbg_print_fd_mon_req ("lstn", \ 976 d->net.listen.fd, \ 977 true, \ 978 false, \ 979 true); 980 } 981 if (listen_only) 982 return ret; 983 984 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); NULL != c; 985 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 986 { 987 mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage); 988 if (is_conn_excluded_from_http_comm (c)) 989 continue; 990 991 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)) 992 fd_set_wrap (c->sk.fd, 993 rfds, 994 &ret, 995 d); 996 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND)) 997 fd_set_wrap (c->sk.fd, 998 wfds, 999 &ret, 1000 d); 1001 fd_set_wrap (c->sk.fd, 1002 efds, 1003 &ret, 1004 d); 1005 mhd_dbg_print_fd_mon_req ("conn", \ 1006 c->sk.fd, \ 1007 FD_ISSET (c->sk.fd, rfds), \ 1008 FD_ISSET (c->sk.fd, wfds), \ 1009 true); 1010 } 1011 1012 return ret; 1013 } 1014 1015 1016 static MHD_FN_PAR_NONNULL_ (1) bool 1017 select_update_statuses_from_fdsets_and_resume_conn (struct MHD_Daemon *d, 1018 int num_events) 1019 { 1020 struct MHD_Connection *c; 1021 fd_set *const restrict rfds = d->events.data.select.rfds; 1022 fd_set *const restrict wfds = d->events.data.select.wfds; 1023 fd_set *const restrict efds = d->events.data.select.efds; 1024 bool resuming_conn; 1025 1026 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 1027 mhd_assert (0 <= num_events); 1028 mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements); 1029 1030 resuming_conn = d->events.act_req.resume; 1031 if (resuming_conn) 1032 { 1033 mhd_assert (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type)); 1034 mhd_assert (! mhd_D_HAS_THR_PER_CONN (d)); 1035 num_events = (int) -1; /* Force process all connections */ 1036 d->events.act_req.resume = false; 1037 } 1038 1039 #ifndef MHD_FAVOR_SMALL_CODE 1040 if (0 == num_events) 1041 return true; 1042 #endif /* MHD_FAVOR_SMALL_CODE */ 1043 1044 #ifdef MHD_SUPPORT_THREADS 1045 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 1046 dbg_print_fd_state_update ("ITC", \ 1047 mhd_itc_r_fd (d->threading.itc), \ 1048 FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds), \ 1049 FD_ISSET (mhd_itc_r_fd (d->threading.itc), wfds), \ 1050 FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds)); 1051 if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds)) 1052 { 1053 log_itc_broken (d); 1054 /* ITC is broken, need to stop the daemon thread now as otherwise 1055 application will not be able to stop the thread. */ 1056 return false; 1057 } 1058 if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds)) 1059 { 1060 --num_events; 1061 /* Clear ITC here, before other data processing. 1062 * Any external events will activate ITC again if additional data to 1063 * process is added externally. Clearing ITC early ensures that new data 1064 * (with additional ITC activation) will not be missed. */ 1065 mhd_itc_clear (d->threading.itc); 1066 } 1067 1068 #ifndef MHD_FAVOR_SMALL_CODE 1069 if (0 == num_events) 1070 return true; 1071 #endif /* MHD_FAVOR_SMALL_CODE */ 1072 #endif /* MHD_SUPPORT_THREADS */ 1073 1074 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1075 { 1076 mhd_assert (! d->net.listen.is_broken); 1077 dbg_print_fd_state_update ("lstn", \ 1078 d->net.listen.fd, \ 1079 FD_ISSET (d->net.listen.fd, rfds), \ 1080 FD_ISSET (d->net.listen.fd, wfds), \ 1081 FD_ISSET (d->net.listen.fd, efds)); 1082 if (FD_ISSET (d->net.listen.fd, efds)) 1083 { 1084 --num_events; 1085 log_listen_broken (d); 1086 /* Close the listening socket unless the master daemon should close it */ 1087 if (! mhd_D_HAS_MASTER (d)) 1088 mhd_socket_close (d->net.listen.fd); 1089 1090 d->events.accept_pending = false; 1091 d->net.listen.is_broken = true; 1092 /* Stop monitoring socket to avoid spinning with busy-waiting */ 1093 d->net.listen.fd = MHD_INVALID_SOCKET; 1094 } 1095 else 1096 { 1097 d->events.accept_pending = FD_ISSET (d->net.listen.fd, rfds); 1098 if (d->events.accept_pending) 1099 --num_events; 1100 } 1101 } 1102 1103 mhd_assert ((0 == num_events) || \ 1104 (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type))); 1105 1106 #ifdef MHD_FAVOR_SMALL_CODE 1107 (void) num_events; 1108 num_events = 1; /* Use static value to minimise the binary size of the next loop */ 1109 #endif /* ! MHD_FAVOR_SMALL_CODE */ 1110 1111 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns), all_conn); 1112 (NULL != c) && (0 != num_events); 1113 c = mhd_DLINKEDL_GET_NEXT (c, all_conn)) 1114 { 1115 if (c->resuming) 1116 start_resuming_connection (c, d); 1117 else 1118 { 1119 MHD_Socket sk; 1120 bool recv_ready; 1121 bool send_ready; 1122 bool err_state; 1123 1124 if (is_conn_excluded_from_http_comm (c)) 1125 continue; 1126 1127 sk = c->sk.fd; 1128 recv_ready = FD_ISSET (sk, rfds); 1129 send_ready = FD_ISSET (sk, wfds); 1130 err_state = FD_ISSET (sk, efds); 1131 1132 update_conn_net_status (d, 1133 c, 1134 recv_ready, 1135 send_ready, 1136 err_state); 1137 #ifndef MHD_FAVOR_SMALL_CODE 1138 if (recv_ready || send_ready || err_state) 1139 --num_events; 1140 #endif /* MHD_FAVOR_SMALL_CODE */ 1141 } 1142 } 1143 1144 #ifndef MHD_FAVOR_SMALL_CODE 1145 // TODO: recheck functionality with HTTP/2 1146 // mhd_assert ((0 == num_events) || resuming_conn); 1147 #endif /* MHD_FAVOR_SMALL_CODE */ 1148 return true; 1149 } 1150 1151 1152 /** 1153 * Get pointer to struct timeval for select() for polling daemon's sockets 1154 * @param d the daemon to use 1155 * @param[out] tmvl to pointer to the allocated struct timeval 1156 * @return the @a tmvl pointer (with maximum wait value set) 1157 * or NULL if select may wait indefinitely 1158 */ 1159 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ 1160 MHD_FN_PAR_OUT_ (2) struct timeval * 1161 get_timeval_for_select (const struct MHD_Daemon *restrict d, 1162 struct timeval *tmvl) 1163 { 1164 const uint_fast64_t max_wait = mhd_daemon_get_wait_max (d); 1165 #ifdef HAVE_TIME_T 1166 time_t max_wait_secs = (time_t) (max_wait / 1000u); 1167 #else /* ! HAVE_TIME_T */ 1168 long max_wait_secs = (long) (max_wait / 1000u); 1169 #endif /* ! HAVE_TIME_T */ 1170 #ifdef HAVE_SUSECONDS_T 1171 suseconds_t max_wait_usecs = (suseconds_t) ((max_wait % 1000u) * 1000u); 1172 #else /* ! HAVE_SUSECONDS_T */ 1173 long max_wait_usecs = (long) ((max_wait % 1000u) * 1000u); 1174 #endif /* ! HAVE_SUSECONDS_T */ 1175 1176 if (MHD_WAIT_INDEFINITELY <= max_wait) 1177 return NULL; 1178 1179 if (0u == max_wait) 1180 { 1181 tmvl->tv_sec = 0; 1182 tmvl->tv_usec = 0; 1183 1184 return tmvl; 1185 } 1186 1187 if (mhd_COND_ALMOST_NEVER ((max_wait / 1000u != 1188 (uint_fast64_t) max_wait_secs) || 1189 (max_wait_secs <= 0))) 1190 { 1191 /* Do not bother figuring out the real maximum 'time_t' value. 1192 '0x7FFFFFFF' is large enough to be already unrealistic and should 1193 fit most of signed or unsigned time_t types. */ 1194 tmvl->tv_sec = 0x7FFFFFFF; 1195 tmvl->tv_usec = 0; 1196 1197 return tmvl; 1198 } 1199 1200 tmvl->tv_sec = max_wait_secs; 1201 tmvl->tv_usec = max_wait_usecs; 1202 1203 return tmvl; 1204 } 1205 1206 1207 /** 1208 * Update states of all connections, check for connection pending 1209 * to be accept()'ed, check for the events on ITC; resume connections 1210 * @param listen_only set to 'true' if connections's sockets should NOT 1211 * be monitored 1212 * @return 'true' if processed successfully, 1213 * 'false' is unrecoverable error occurs and the daemon must be 1214 * closed 1215 */ 1216 static MHD_FN_PAR_NONNULL_ (1) bool 1217 get_all_net_updates_by_select_and_resume_conn (struct MHD_Daemon *restrict d, 1218 bool listen_only) 1219 { 1220 int max_socket; 1221 struct timeval tmvl_value; 1222 struct timeval *tmvl_ptr; 1223 int num_events; 1224 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 1225 1226 max_socket = select_update_fdsets (d, 1227 listen_only); 1228 1229 tmvl_ptr = get_timeval_for_select (d, 1230 &tmvl_value); 1231 1232 #ifdef MHD_SOCKETS_KIND_WINSOCK 1233 if (0 == max_socket) 1234 { 1235 Sleep (tmvl_ptr ? tmvl_ptr->tv_sec : 600); 1236 return true; 1237 } 1238 #endif /* MHD_SOCKETS_KIND_WINSOCK */ 1239 1240 #ifdef mhd_DEBUG_POLLING_FDS 1241 if (NULL != tmvl_ptr) 1242 fprintf (stderr, 1243 "### (Starting) select(%d, rfds, wfds, efds, [%llu, %llu])...\n", 1244 max_socket + 1, 1245 (unsigned long long) tmvl_ptr->tv_sec, 1246 (unsigned long long) tmvl_ptr->tv_usec); 1247 else 1248 fprintf (stderr, 1249 "### (Starting) select(%d, rfds, wfds, efds, [NULL])...\n", 1250 max_socket + 1); 1251 #endif /* mhd_DEBUG_POLLING_FDS */ 1252 num_events = select (max_socket + 1, 1253 d->events.data.select.rfds, 1254 d->events.data.select.wfds, 1255 d->events.data.select.efds, 1256 tmvl_ptr); 1257 #ifdef mhd_DEBUG_POLLING_FDS 1258 if (NULL != tmvl_ptr) 1259 fprintf (stderr, 1260 "### (Finished) select(%d, rfds, wfds, efds, ->[%llu, %llu]) -> " 1261 "%d\n", 1262 max_socket + 1, 1263 (unsigned long long) tmvl_ptr->tv_sec, 1264 (unsigned long long) tmvl_ptr->tv_usec, 1265 num_events); 1266 else 1267 fprintf (stderr, 1268 "### (Finished) select(%d, rfds, wfds, efds, [NULL]) -> " 1269 "%d\n", 1270 max_socket + 1, 1271 num_events); 1272 #endif /* mhd_DEBUG_POLLING_FDS */ 1273 1274 if (0 > num_events) 1275 { 1276 int err; 1277 bool is_hard_error; 1278 bool is_ignored_error; 1279 is_hard_error = false; 1280 is_ignored_error = false; 1281 #if defined(MHD_SOCKETS_KIND_POSIX) 1282 err = errno; 1283 if (0 != err) 1284 { 1285 is_hard_error = 1286 ((mhd_EBADF_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err)); 1287 is_ignored_error = (mhd_EINTR_OR_ZERO == err); 1288 } 1289 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 1290 err = WSAGetLastError (); 1291 is_hard_error = 1292 ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err) || 1293 (WSANOTINITIALISED == err)); 1294 #endif 1295 if (! is_ignored_error) 1296 { 1297 if (is_hard_error) 1298 { 1299 mhd_LOG_MSG (d, MHD_SC_SELECT_HARD_ERROR, \ 1300 "The select() encountered unrecoverable error."); 1301 return false; 1302 } 1303 mhd_LOG_MSG (d, MHD_SC_SELECT_SOFT_ERROR, \ 1304 "The select() encountered error."); 1305 return true; 1306 } 1307 } 1308 1309 return select_update_statuses_from_fdsets_and_resume_conn (d, num_events); 1310 } 1311 1312 1313 #endif /* MHD_SUPPORT_SELECT */ 1314 1315 1316 #ifdef MHD_SUPPORT_POLL 1317 1318 static MHD_FN_PAR_NONNULL_ (1) unsigned int 1319 poll_update_fds (struct MHD_Daemon *restrict d, 1320 bool listen_only) 1321 { 1322 unsigned int i_s; 1323 unsigned int i_c; 1324 struct MHD_Connection *restrict c; 1325 #ifndef NDEBUG 1326 unsigned int num_skipped = 0; 1327 #endif /* ! NDEBUG */ 1328 1329 mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type); 1330 1331 i_s = 0; 1332 #ifdef MHD_SUPPORT_THREADS 1333 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 1334 mhd_assert (d->events.data.poll.fds[i_s].fd == \ 1335 mhd_itc_r_fd (d->threading.itc)); 1336 mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \ 1337 d->events.data.poll.rel[i_s].fd_id); 1338 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1339 mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events); 1340 #else /* HAVE_POLL_CLOBBERS_EVENTS */ 1341 d->events.data.poll.fds[i_s].events = POLLIN; 1342 #endif /* HAVE_POLL_CLOBBERS_EVENTS */ 1343 mhd_dbg_print_fd_mon_req ("ITC", \ 1344 mhd_itc_r_fd (d->threading.itc), \ 1345 true, \ 1346 false, \ 1347 false); 1348 ++i_s; 1349 #endif 1350 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1351 { 1352 mhd_assert (! d->net.listen.is_broken); 1353 mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd); 1354 mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \ 1355 d->events.data.poll.rel[i_s].fd_id); 1356 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1357 mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) || 1358 (0 == d->events.data.poll.fds[i_s].events)); 1359 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */ 1360 d->events.data.poll.fds[i_s].events = d->conns.block_new ? 0 : POLLIN; 1361 mhd_dbg_print_fd_mon_req ("lstn", \ 1362 d->net.listen.fd, \ 1363 POLLIN == d->events.data.poll.fds[i_s].events, \ 1364 false, \ 1365 false); 1366 ++i_s; 1367 } 1368 if (listen_only) 1369 return i_s; 1370 1371 i_c = i_s; 1372 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); NULL != c; 1373 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 1374 { 1375 unsigned short events; /* 'unsigned' for correct bits manipulations */ 1376 1377 if (is_conn_excluded_from_http_comm (c)) 1378 { 1379 #ifndef NDEBUG 1380 ++num_skipped; 1381 #endif /* ! NDEBUG */ 1382 continue; 1383 } 1384 1385 mhd_assert ((i_c - i_s) < d->conns.cfg.count_limit); 1386 mhd_assert (i_c < d->dbg.num_events_elements); 1387 mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage); 1388 1389 d->events.data.poll.fds[i_c].fd = c->sk.fd; 1390 d->events.data.poll.rel[i_c].connection = c; 1391 events = 0; 1392 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)) 1393 events |= MHD_POLL_IN; 1394 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND)) 1395 events |= MHD_POLL_OUT; 1396 1397 d->events.data.poll.fds[i_c].events = (short) events; 1398 mhd_dbg_print_fd_mon_req ("conn", \ 1399 c->sk.fd, \ 1400 MHD_POLL_IN == (MHD_POLL_IN & events), \ 1401 MHD_POLL_OUT == (MHD_POLL_OUT & events), \ 1402 false); 1403 ++i_c; 1404 } 1405 mhd_assert ((d->conns.count - num_skipped) == (i_c - i_s)); 1406 mhd_assert (i_c <= d->dbg.num_events_elements); 1407 return i_c; 1408 } 1409 1410 1411 static MHD_FN_PAR_NONNULL_ (1) bool 1412 poll_update_statuses_from_fds (struct MHD_Daemon *restrict d, 1413 int num_events) 1414 { 1415 unsigned int i_s; 1416 unsigned int i_c; 1417 mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type); 1418 mhd_assert (0 <= num_events); 1419 mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements); 1420 1421 if (0 == num_events) 1422 return true; 1423 1424 i_s = 0; 1425 #ifdef MHD_SUPPORT_THREADS 1426 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 1427 mhd_assert (d->events.data.poll.fds[i_s].fd == \ 1428 mhd_itc_r_fd (d->threading.itc)); 1429 mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \ 1430 d->events.data.poll.rel[i_s].fd_id); 1431 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1432 mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events); 1433 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */ 1434 dbg_print_fd_state_update ( \ 1435 "ITC", \ 1436 d->events.data.poll.fds[i_s].fd, \ 1437 0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN)), \ 1438 0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_OUT | POLLOUT)), \ 1439 0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL))); 1440 1441 if (0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL))) 1442 { 1443 log_itc_broken (d); 1444 /* ITC is broken, need to stop the daemon thread now as otherwise 1445 application will not be able to stop the thread. */ 1446 return false; 1447 } 1448 if (0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN))) 1449 { 1450 --num_events; 1451 /* Clear ITC here, before other data processing. 1452 * Any external events will activate ITC again if additional data to 1453 * process is added externally. Clearing ITC early ensures that new data 1454 * (with additional ITC activation) will not be missed. */ 1455 mhd_itc_clear (d->threading.itc); 1456 } 1457 ++i_s; 1458 1459 if (0 == num_events) 1460 return true; 1461 #endif /* MHD_SUPPORT_THREADS */ 1462 1463 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1464 { 1465 const short revents = d->events.data.poll.fds[i_s].revents; 1466 1467 mhd_assert (! d->net.listen.is_broken); 1468 mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd); 1469 mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \ 1470 d->events.data.poll.rel[i_s].fd_id); 1471 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1472 mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) || 1473 (0 == d->events.data.poll.fds[i_s].events)); 1474 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */ 1475 dbg_print_fd_state_update ("lstn", \ 1476 d->events.data.poll.fds[i_s].fd, \ 1477 0 != (revents & (MHD_POLL_IN | POLLIN)), \ 1478 0 != (revents & (MHD_POLL_OUT | POLLOUT)), \ 1479 0 != (revents & (POLLERR | POLLNVAL | POLLHUP))); 1480 if (0 != (revents & (POLLERR | POLLNVAL | POLLHUP))) 1481 { 1482 --num_events; 1483 log_listen_broken (d); 1484 /* Close the listening socket unless the master daemon should close it */ 1485 if (! mhd_D_HAS_MASTER (d)) 1486 mhd_socket_close (d->net.listen.fd); 1487 1488 d->events.accept_pending = false; 1489 d->net.listen.is_broken = true; 1490 /* Stop monitoring socket to avoid spinning with busy-waiting */ 1491 d->net.listen.fd = MHD_INVALID_SOCKET; 1492 } 1493 else 1494 { 1495 const bool has_new_conns = (0 != (revents & (MHD_POLL_IN | POLLIN))); 1496 if (has_new_conns) 1497 { 1498 --num_events; 1499 d->events.accept_pending = true; 1500 } 1501 else 1502 { 1503 /* Check whether the listen socket was monitored for incoming 1504 connections */ 1505 if (0 != (d->events.data.poll.fds[i_s].events & POLLIN)) 1506 d->events.accept_pending = false; 1507 } 1508 } 1509 ++i_s; 1510 } 1511 1512 mhd_assert ((0 == num_events) || \ 1513 (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type))); 1514 1515 for (i_c = i_s; (i_c < i_s + d->conns.count) && (0 < num_events); ++i_c) 1516 { 1517 struct MHD_Connection *restrict c; 1518 bool recv_ready; 1519 bool send_ready; 1520 bool err_state; 1521 short revents; 1522 mhd_assert (i_c < d->dbg.num_events_elements); 1523 mhd_assert (mhd_SOCKET_REL_MARKER_EMPTY != \ 1524 d->events.data.poll.rel[i_c].fd_id); 1525 mhd_assert (mhd_SOCKET_REL_MARKER_ITC != \ 1526 d->events.data.poll.rel[i_c].fd_id); 1527 mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN != \ 1528 d->events.data.poll.rel[i_c].fd_id); 1529 1530 c = d->events.data.poll.rel[i_c].connection; 1531 mhd_assert (! is_conn_excluded_from_http_comm (c)); 1532 mhd_assert (c->sk.fd == d->events.data.poll.fds[i_c].fd); 1533 revents = d->events.data.poll.fds[i_c].revents; 1534 recv_ready = (0 != (revents & (MHD_POLL_IN | POLLIN))); 1535 send_ready = (0 != (revents & (MHD_POLL_OUT | POLLOUT))); 1536 #ifndef MHD_POLLHUP_ON_REM_SHUT_WR 1537 err_state = (0 != (revents & (POLLHUP | POLLERR | POLLNVAL))); 1538 #else 1539 err_state = (0 != (revents & (POLLERR | POLLNVAL))); 1540 if (0 != (revents & POLLHUP)) 1541 { /* This can be a disconnect OR remote side set SHUT_WR */ 1542 recv_ready = true; /* Check the socket by reading */ 1543 if (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)) 1544 err_state = true; /* The socket will not be checked by reading, the only way to avoid spinning */ 1545 } 1546 #endif 1547 if (0 != (revents & (MHD_POLLPRI | MHD_POLLRDBAND))) 1548 { /* Statuses were not requested, but returned */ 1549 if (! recv_ready || 1550 (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))) 1551 err_state = true; /* The socket will not be read, the only way to avoid spinning */ 1552 } 1553 if (0 != (revents & MHD_POLLWRBAND)) 1554 { /* Status was not requested, but returned */ 1555 if (! send_ready || 1556 (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND))) 1557 err_state = true; /* The socket will not be written, the only way to avoid spinning */ 1558 } 1559 1560 update_conn_net_status (d, c, recv_ready, send_ready, err_state); 1561 } 1562 mhd_assert (d->conns.count >= (i_c - i_s)); 1563 mhd_assert (i_c <= d->dbg.num_events_elements); 1564 return true; 1565 } 1566 1567 1568 static MHD_FN_PAR_NONNULL_ (1) bool 1569 get_all_net_updates_by_poll (struct MHD_Daemon *restrict d, 1570 bool listen_only) 1571 { 1572 #ifdef mhd_DEBUG_POLLING_FDS 1573 # ifdef MHD_SOCKETS_KIND_POSIX 1574 static const char poll_fn_name[] = "poll"; 1575 # else /* MHD_SOCKETS_KIND_WINSOCK */ 1576 static const char poll_fn_name[] = "WSAPoll"; 1577 # endif /* MHD_SOCKETS_KIND_WINSOCK */ 1578 #endif /* mhd_DEBUG_POLLING_FDS */ 1579 unsigned int num_fds; 1580 int max_wait; 1581 int num_events; 1582 1583 mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type); 1584 1585 num_fds = poll_update_fds (d, listen_only); 1586 1587 // TODO: handle empty list situation 1588 max_wait = get_max_wait (d); 1589 1590 #ifdef mhd_DEBUG_POLLING_FDS 1591 fprintf (stderr, 1592 "### (Starting) %s(fds, %u, %d)...\n", 1593 poll_fn_name, 1594 num_fds, 1595 max_wait); 1596 #endif /* mhd_DEBUG_POLLING_FDS */ 1597 num_events = mhd_poll (d->events.data.poll.fds, 1598 num_fds, 1599 max_wait); // TODO: use correct timeout value 1600 #ifdef mhd_DEBUG_POLLING_FDS 1601 fprintf (stderr, 1602 "### (Finished) %s(fds, %u, %d) -> %d\n", 1603 poll_fn_name, 1604 num_fds, 1605 max_wait, 1606 num_events); 1607 #endif /* mhd_DEBUG_POLLING_FDS */ 1608 if (0 > num_events) 1609 { 1610 int err; 1611 bool is_hard_error; 1612 bool is_ignored_error; 1613 is_hard_error = false; 1614 is_ignored_error = false; 1615 #if defined(MHD_SOCKETS_KIND_POSIX) 1616 err = errno; 1617 if (0 != err) 1618 { 1619 is_hard_error = 1620 ((mhd_EFAULT_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err)); 1621 is_ignored_error = (mhd_EINTR_OR_ZERO == err); 1622 } 1623 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 1624 err = WSAGetLastError (); 1625 is_hard_error = 1626 ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err)); 1627 #endif 1628 if (! is_ignored_error) 1629 { 1630 if (is_hard_error) 1631 { 1632 mhd_LOG_MSG (d, MHD_SC_POLL_HARD_ERROR, \ 1633 "The poll() encountered unrecoverable error."); 1634 return false; 1635 } 1636 mhd_LOG_MSG (d, MHD_SC_POLL_SOFT_ERROR, \ 1637 "The poll() encountered error."); 1638 } 1639 return true; 1640 } 1641 1642 return poll_update_statuses_from_fds (d, num_events); 1643 } 1644 1645 1646 #endif /* MHD_SUPPORT_POLL */ 1647 1648 #ifdef MHD_SUPPORT_EPOLL 1649 1650 /** 1651 * Map events provided by epoll to connection states, ITC and 1652 * listen socket states 1653 */ 1654 static MHD_FN_PAR_NONNULL_ (1) bool 1655 update_statuses_from_eevents (struct MHD_Daemon *restrict d, 1656 unsigned int num_events) 1657 { 1658 unsigned int i; 1659 struct epoll_event *const restrict events = 1660 d->events.data.epoll.events; 1661 for (i = 0; num_events > i; ++i) 1662 { 1663 struct epoll_event *const e = events + i; 1664 #ifdef MHD_SUPPORT_THREADS 1665 if (((uint64_t) mhd_SOCKET_REL_MARKER_ITC) == e->data.u64) /* uint64_t is in the system header */ 1666 { 1667 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 1668 dbg_print_fd_state_update ( \ 1669 "ITC", \ 1670 mhd_itc_r_fd (d->threading.itc), \ 1671 0 != (e->events & EPOLLIN), \ 1672 0 != (e->events & EPOLLOUT), \ 1673 0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))); 1674 1675 if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))) 1676 { 1677 log_itc_broken (d); 1678 /* ITC is broken, need to stop the daemon thread now as otherwise 1679 application will not be able to stop the thread. */ 1680 return false; 1681 } 1682 if (0 != (e->events & EPOLLIN)) 1683 { 1684 /* Clear ITC here, before other data processing. 1685 * Any external events will activate ITC again if additional data to 1686 * process is added externally. Clearing ITC early ensures that new data 1687 * (with additional ITC activation) will not be missed. */ 1688 mhd_itc_clear (d->threading.itc); 1689 } 1690 } 1691 else 1692 #endif /* MHD_SUPPORT_THREADS */ 1693 if (((uint64_t) mhd_SOCKET_REL_MARKER_LISTEN) == e->data.u64) /* uint64_t is in the system header */ 1694 { 1695 mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd); 1696 dbg_print_fd_state_update ( \ 1697 "lstn", \ 1698 d->net.listen.fd, \ 1699 0 != (e->events & EPOLLIN), \ 1700 0 != (e->events & EPOLLOUT), \ 1701 0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))); 1702 if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))) 1703 { 1704 log_listen_broken (d); 1705 1706 /* Close the listening socket unless the master daemon should close it */ 1707 if (! mhd_D_HAS_MASTER (d)) 1708 mhd_socket_close (d->net.listen.fd); 1709 else 1710 { 1711 /* Ignore possible error as the socket could be already removed 1712 from the epoll monitoring by closing the socket */ 1713 (void) epoll_ctl (d->events.data.epoll.e_fd, 1714 EPOLL_CTL_DEL, 1715 d->net.listen.fd, 1716 NULL); 1717 } 1718 1719 d->events.accept_pending = false; 1720 d->net.listen.is_broken = true; 1721 d->net.listen.fd = MHD_INVALID_SOCKET; 1722 } 1723 else 1724 d->events.accept_pending = (0 != (e->events & EPOLLIN)); 1725 } 1726 else 1727 { 1728 bool recv_ready; 1729 bool send_ready; 1730 bool err_state; 1731 struct MHD_Connection *const restrict c = 1732 (struct MHD_Connection *) e->data.ptr; 1733 mhd_assert (! is_conn_excluded_from_http_comm (c)); 1734 recv_ready = (0 != (e->events & (EPOLLIN | EPOLLERR | EPOLLHUP))); 1735 send_ready = (0 != (e->events & (EPOLLOUT | EPOLLERR | EPOLLHUP))); 1736 err_state = (0 != (e->events & (EPOLLERR | EPOLLHUP))); 1737 1738 update_conn_net_status (d, c, recv_ready, send_ready, err_state); 1739 } 1740 } 1741 return true; 1742 } 1743 1744 1745 /** 1746 * Update states of all connections, check for connection pending 1747 * to be accept()'ed, check for the events on ITC. 1748 */ 1749 static MHD_FN_PAR_NONNULL_ (1) bool 1750 get_all_net_updates_by_epoll (struct MHD_Daemon *restrict d) 1751 { 1752 int max_events; 1753 int num_events; 1754 unsigned int events_processed; 1755 int max_wait; 1756 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1757 mhd_assert (0 < ((int) d->events.data.epoll.num_elements)); 1758 mhd_assert (d->events.data.epoll.num_elements == \ 1759 (size_t) ((int) d->events.data.epoll.num_elements)); 1760 mhd_assert (0 != d->events.data.epoll.num_elements); 1761 mhd_assert (0 != d->conns.cfg.count_limit); 1762 mhd_assert (d->events.data.epoll.num_elements == d->dbg.num_events_elements); 1763 1764 // TODO: add listen socket enable/disable 1765 1766 /* Minimise amount of data passed from userspace to kernel and back */ 1767 max_events = (int) d->conns.cfg.count_limit; 1768 #ifdef MHD_SUPPORT_THREADS 1769 ++max_events; 1770 #endif /* MHD_SUPPORT_THREADS */ 1771 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1772 ++max_events; 1773 /* Make sure that one extra slot used to clearly detect that all events 1774 * were gotten. */ 1775 ++max_events; 1776 if ((0 > max_events) || 1777 (max_events > (int) d->events.data.epoll.num_elements)) 1778 max_events = (int) d->events.data.epoll.num_elements; 1779 1780 events_processed = 0; 1781 max_wait = get_max_wait (d); 1782 do 1783 { 1784 #ifdef mhd_DEBUG_POLLING_FDS 1785 fprintf (stderr, 1786 "### (Starting) epoll_wait(%d, events, %d, %d)...\n", 1787 d->events.data.epoll.e_fd, 1788 (int) d->events.data.epoll.num_elements, 1789 max_wait); 1790 #endif /* mhd_DEBUG_POLLING_FDS */ 1791 num_events = epoll_wait (d->events.data.epoll.e_fd, 1792 d->events.data.epoll.events, 1793 max_events, 1794 max_wait); 1795 #ifdef mhd_DEBUG_POLLING_FDS 1796 fprintf (stderr, 1797 "### (Finished) epoll_wait(%d, events, %d, %d) -> %d\n", 1798 d->events.data.epoll.e_fd, 1799 max_events, 1800 max_wait, 1801 num_events); 1802 #endif /* mhd_DEBUG_POLLING_FDS */ 1803 max_wait = 0; 1804 if (0 > num_events) 1805 { 1806 const int err = errno; 1807 if (EINTR != err) 1808 { 1809 mhd_LOG_MSG (d, MHD_SC_EPOLL_HARD_ERROR, \ 1810 "The epoll_wait() encountered unrecoverable error."); 1811 return false; 1812 } 1813 return true; /* EINTR, try next time */ 1814 } 1815 if (! update_statuses_from_eevents (d, (unsigned int) num_events)) 1816 return false; 1817 if (max_events > num_events) 1818 return true; /* All events have been read */ 1819 1820 /* Use all buffer for the next getting events round(s) */ 1821 max_events = (int) d->events.data.epoll.num_elements; 1822 mhd_assert (0 < max_events); 1823 mhd_assert (d->events.data.epoll.num_elements == (size_t) max_events); 1824 max_wait = 0; /* Do not block on the next getting events rounds */ 1825 1826 events_processed += (unsigned int) num_events; /* Avoid reading too many events */ 1827 } while ((events_processed < d->conns.cfg.count_limit) 1828 || (events_processed < d->conns.cfg.count_limit + 2)); 1829 1830 return true; 1831 } 1832 1833 1834 #endif /* MHD_SUPPORT_EPOLL */ 1835 1836 /** 1837 * Close timed-out connections (if any) 1838 * @param d the daemon to use 1839 */ 1840 static MHD_FN_PAR_NONNULL_ALL_ void 1841 daemon_close_timedout_conns (struct MHD_Daemon *restrict d) 1842 { 1843 struct MHD_Connection *c; 1844 struct MHD_Connection *prev_c; 1845 1846 #if defined(MHD_SUPPORT_THREADS) 1847 mhd_assert (! mhd_D_HAS_WORKERS (d)); 1848 mhd_assert (! mhd_D_HAS_THR_PER_CONN (d)); 1849 #endif /* MHD_SUPPORT_THREADS */ 1850 1851 /* Check "normal" timeouts list */ 1852 c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.def_timeout)); 1853 1854 while (NULL != c) 1855 { 1856 mhd_assert (! c->timeout.in_cstm_tmout_list); 1857 mhd_assert (0u != d->conns.cfg.timeout_milsec); 1858 1859 if (mhd_conn_is_timeout_expired (c)) 1860 { 1861 prev_c = mhd_DLINKEDL_GET_PREV (&(c->timeout), 1862 tmout_list); 1863 mhd_conn_start_closing_timedout (c); 1864 mhd_conn_pre_clean (c); 1865 mhd_conn_remove_from_daemon (c); 1866 mhd_conn_close_final (c); 1867 1868 c = prev_c; 1869 } 1870 else 1871 break; /* DL-list is sorted, no need to check the rest of the list */ 1872 } 1873 1874 /* Check "custom" timeouts list */ 1875 c = mhd_DLINKEDL_GET_LAST_D (&(d->conns.cust_timeout)); 1876 1877 while (NULL != c) 1878 { 1879 mhd_assert (c->timeout.in_cstm_tmout_list); 1880 1881 prev_c = mhd_DLINKEDL_GET_PREV (&(c->timeout), 1882 tmout_list); 1883 1884 if (mhd_conn_is_timeout_expired (c)) 1885 { 1886 mhd_conn_start_closing_timedout (c); 1887 mhd_conn_pre_clean (c); 1888 mhd_conn_remove_from_daemon (c); 1889 mhd_conn_close_final (c); 1890 } 1891 1892 /* "Custom" timeouts list is not sorted, check all members */ 1893 c = prev_c; 1894 } 1895 } 1896 1897 1898 /** 1899 * Prepare daemon's data for the new round of connections processing 1900 * @param d the daemon to use 1901 */ 1902 static MHD_FN_PAR_NONNULL_ALL_ void 1903 daemon_reset_per_round_data (struct MHD_Daemon *restrict d) 1904 { 1905 d->events.time.is_set = false; 1906 } 1907 1908 1909 /** 1910 * Perform one round of daemon connection and data processing. 1911 * 1912 * This function do the following: 1913 * + poll all connections and daemon FDs (if internal polling is used); 1914 * + resume connections pending to be resumed; 1915 * + update connection statuses based on socket states (recv/send ready or 1916 * disconnect detection); 1917 * + receive, send and/or parse connections data as needed, including call of 1918 * callbacks for processing requests and response generation; 1919 * + close broken connections; 1920 * + accept new connection (if needed); 1921 * + cleanup closed "upgraded" connections. 1922 * @param d the daemon to use 1923 * @return 'true' on success, 1924 * 'false' if daemon is broken 1925 */ 1926 static MHD_FN_PAR_NONNULL_ (1) bool 1927 process_all_events_and_data (struct MHD_Daemon *restrict d) 1928 { 1929 daemon_reset_per_round_data (d); 1930 1931 switch (d->events.poll_type) 1932 { 1933 case mhd_POLL_TYPE_EXT: 1934 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1935 if (! ext_events_process_net_updates_and_resume_conn (d)) 1936 return false; 1937 break; 1938 #ifdef MHD_SUPPORT_SELECT 1939 case mhd_POLL_TYPE_SELECT: 1940 if (! get_all_net_updates_by_select_and_resume_conn (d, false)) 1941 return false; 1942 break; 1943 #endif /* MHD_SUPPORT_SELECT */ 1944 #ifdef MHD_SUPPORT_POLL 1945 case mhd_POLL_TYPE_POLL: 1946 if (! get_all_net_updates_by_poll (d, false)) 1947 return false; 1948 daemon_resume_conns_if_needed (d); 1949 break; 1950 #endif /* MHD_SUPPORT_POLL */ 1951 #ifdef MHD_SUPPORT_EPOLL 1952 case mhd_POLL_TYPE_EPOLL: 1953 if (! get_all_net_updates_by_epoll (d)) 1954 return false; 1955 daemon_resume_conns_if_needed (d); 1956 break; 1957 #endif /* MHD_SUPPORT_EPOLL */ 1958 #ifndef MHD_SUPPORT_SELECT 1959 case mhd_POLL_TYPE_SELECT: 1960 #endif /* ! MHD_SUPPORT_SELECT */ 1961 #ifndef MHD_SUPPORT_POLL 1962 case mhd_POLL_TYPE_POLL: 1963 #endif /* ! MHD_SUPPORT_POLL */ 1964 case mhd_POLL_TYPE_NOT_SET_YET: 1965 default: 1966 mhd_UNREACHABLE (); 1967 MHD_PANIC ("Daemon data integrity broken"); 1968 break; 1969 } 1970 1971 mhd_daemon_process_ext_added_conns (d); 1972 1973 if (d->events.accept_pending && ! d->conns.block_new) 1974 d->events.accept_pending = ! daemon_accept_new_conns (d); 1975 1976 daemon_process_all_active_conns (d); 1977 daemon_close_timedout_conns (d); 1978 daemon_cleanup_upgraded_conns (d); 1979 return ! mhd_D_HAS_STOP_REQ (d); 1980 } 1981 1982 1983 static 1984 MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 1985 process_reg_events_int (struct MHD_Daemon *MHD_RESTRICT daemon, 1986 uint_fast64_t *MHD_RESTRICT next_max_wait) 1987 { 1988 enum MHD_StatusCode res; 1989 1990 if (mhd_DAEMON_STATE_STARTED > daemon->state) 1991 return MHD_SC_TOO_EARLY; 1992 if (! mhd_WM_INT_HAS_EXT_EVENTS (daemon->wmode_int)) 1993 return MHD_SC_EXTERNAL_EVENT_ONLY; 1994 if (mhd_DAEMON_STATE_STARTED < daemon->state) 1995 return MHD_SC_TOO_LATE; 1996 1997 #ifdef MHD_SUPPORT_THREADS 1998 if (daemon->events.data.extr.itc_data.is_broken) 1999 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 2000 #endif /* MHD_SUPPORT_THREADS */ 2001 2002 if (daemon->net.listen.is_broken) 2003 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 2004 2005 /* Ignore returned value */ 2006 (void) process_all_events_and_data (daemon); 2007 2008 if (NULL != next_max_wait) 2009 *next_max_wait = MHD_WAIT_INDEFINITELY; 2010 2011 res = ext_events_update_registrations (daemon); 2012 if (MHD_SC_OK != res) 2013 return res; 2014 2015 #ifdef MHD_SUPPORT_THREADS 2016 if (daemon->events.data.extr.itc_data.is_broken) 2017 { 2018 log_itc_broken (daemon); 2019 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 2020 } 2021 #endif /* MHD_SUPPORT_THREADS */ 2022 2023 if (daemon->net.listen.is_broken) 2024 { 2025 log_listen_broken (daemon); 2026 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 2027 } 2028 2029 if (NULL != next_max_wait) 2030 *next_max_wait = mhd_daemon_get_wait_max (daemon); 2031 2032 return MHD_SC_OK; 2033 } 2034 2035 2036 MHD_EXTERN_ 2037 MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 2038 MHD_daemon_process_reg_events (struct MHD_Daemon *MHD_RESTRICT daemon, 2039 uint_fast64_t *MHD_RESTRICT next_max_wait) 2040 { 2041 enum MHD_StatusCode res; 2042 #ifdef mhd_DEBUG_POLLING_FDS 2043 fprintf (stderr, 2044 "### (Starting) MHD_daemon_process_reg_events(daemon, [%s])...\n", 2045 (NULL != next_max_wait) ? "non-NULL" : "NULL"); 2046 #endif 2047 res = process_reg_events_int (daemon, 2048 next_max_wait); 2049 #ifdef mhd_DEBUG_POLLING_FDS 2050 if (NULL == next_max_wait) 2051 fprintf (stderr, 2052 "### (Finished) MHD_daemon_process_reg_events(daemon, [NULL]) ->" 2053 "%u\n", 2054 (unsigned int) res); 2055 else if (MHD_WAIT_INDEFINITELY == *next_max_wait) 2056 fprintf (stderr, 2057 "### (Finished) MHD_daemon_process_reg_events(daemon, " 2058 "->MHD_WAIT_INDEFINITELY) ->%u\n", 2059 (unsigned int) res); 2060 else 2061 fprintf (stderr, 2062 "### (Finished) MHD_daemon_process_reg_events(daemon, ->%llu) " 2063 "->%u\n", 2064 (unsigned long long) *next_max_wait, 2065 (unsigned int) res); 2066 #endif 2067 return res; 2068 } 2069 2070 2071 #ifdef MHD_SUPPORT_THREADS 2072 2073 /** 2074 * The entry point for the daemon worker thread 2075 * @param cls the closure 2076 */ 2077 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC 2078 mhd_worker_all_events (void *cls) 2079 { 2080 struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls; 2081 mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid)); 2082 mhd_assert (d->dbg.net_inited); 2083 mhd_assert (! d->dbg.net_deinited); 2084 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 2085 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 2086 mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY != d->threading.d_type); 2087 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 2088 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION != d->wmode_int); 2089 mhd_assert (d->dbg.events_fully_inited); 2090 mhd_assert (d->dbg.connections_inited); 2091 2092 #ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE 2093 // TODO: store and use the result 2094 (void) mhd_thread_block_sigpipe (); 2095 #endif 2096 2097 while (! d->threading.stop_requested) 2098 { 2099 if (! process_all_events_and_data (d)) 2100 break; 2101 } 2102 if (! d->threading.stop_requested) 2103 { 2104 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \ 2105 "The daemon thread is stopping, but termination has not " \ 2106 "been requested for the daemon."); 2107 } 2108 mhd_daemon_close_all_conns (d); 2109 2110 #ifdef MHD_SUPPORT_HTTPS 2111 if (mhd_D_HAS_TLS (d)) 2112 mhd_tls_thread_cleanup (d->tls); 2113 #endif /* MHD_SUPPORT_HTTPS */ 2114 2115 return (mhd_THRD_RTRN_TYPE) 0; 2116 } 2117 2118 2119 static MHD_FN_PAR_NONNULL_ (1) bool 2120 process_listening_and_itc_only (struct MHD_Daemon *restrict d) 2121 { 2122 if (false) 2123 (void) 0; 2124 #ifdef MHD_SUPPORT_SELECT 2125 else if (mhd_POLL_TYPE_SELECT == d->events.poll_type) 2126 { 2127 return false; // TODO: implement 2128 } 2129 #endif /* MHD_SUPPORT_SELECT */ 2130 #ifdef MHD_SUPPORT_POLL 2131 else if (mhd_POLL_TYPE_POLL == d->events.poll_type) 2132 { 2133 if (! get_all_net_updates_by_poll (d, true)) 2134 return false; 2135 } 2136 #endif /* MHD_SUPPORT_POLL */ 2137 else 2138 { 2139 (void) d; /* Mute compiler warning */ 2140 mhd_assert (0 && "Impossible value"); 2141 mhd_UNREACHABLE (); 2142 MHD_PANIC ("Daemon data integrity broken"); 2143 } 2144 // TODO: Accept connections 2145 return false; 2146 } 2147 2148 2149 /** 2150 * The entry point for the daemon listening thread 2151 * @param cls the closure 2152 */ 2153 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC 2154 mhd_worker_listening_only (void *cls) 2155 { 2156 struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls; 2157 mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid)); 2158 2159 mhd_assert (d->dbg.net_inited); 2160 mhd_assert (! d->dbg.net_deinited); 2161 mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY == d->threading.d_type); 2162 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION == d->wmode_int); 2163 mhd_assert (d->dbg.events_fully_inited); 2164 mhd_assert (d->dbg.connections_inited); 2165 2166 #ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE 2167 // TODO: store and use the result 2168 (void) mhd_thread_block_sigpipe (); 2169 #endif 2170 2171 while (! d->threading.stop_requested) 2172 { 2173 if (! process_listening_and_itc_only (d)) 2174 break; 2175 } 2176 if (! d->threading.stop_requested) 2177 { 2178 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \ 2179 "The daemon thread is stopping, but termination has " \ 2180 "not been requested by the daemon."); 2181 } 2182 2183 #ifdef MHD_SUPPORT_HTTPS 2184 if (mhd_D_HAS_TLS (d)) 2185 mhd_tls_thread_cleanup (d->tls); 2186 #endif /* MHD_SUPPORT_HTTPS */ 2187 2188 return (mhd_THRD_RTRN_TYPE) 0; 2189 } 2190 2191 2192 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC 2193 mhd_worker_connection (void *cls) 2194 { 2195 if (cls) // TODO: Implement 2196 MHD_PANIC ("Not yet implemented"); 2197 2198 #if 0 // def MHD_SUPPORT_HTTPS 2199 if (mhd_D_HAS_TLS (d)) 2200 mhd_tls_thread_cleanup (d->tls); 2201 #endif /* MHD_SUPPORT_HTTPS */ 2202 2203 return (mhd_THRD_RTRN_TYPE) 0; 2204 } 2205 2206 2207 #endif /* MHD_SUPPORT_THREADS */