events_process.c (62708B)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */ 2 /* 3 This file is part of GNU libmicrohttpd. 4 Copyright (C) 2024 Evgeny Grin (Karlson2k) 5 6 GNU libmicrohttpd is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Lesser General Public 8 License as published by the Free Software Foundation; either 9 version 2.1 of the License, or (at your option) any later version. 10 11 GNU libmicrohttpd is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Lesser General Public License for more details. 15 16 Alternatively, you can redistribute GNU libmicrohttpd and/or 17 modify it under the terms of the GNU General Public License as 18 published by the Free Software Foundation; either version 2 of 19 the License, or (at your option) any later version, together 20 with the eCos exception, as follows: 21 22 As a special exception, if other files instantiate templates or 23 use macros or inline functions from this file, or you compile this 24 file and link it with other works to produce a work based on this 25 file, this file does not by itself cause the resulting work to be 26 covered by the GNU General Public License. However the source code 27 for this file must still be made available in accordance with 28 section (3) of the GNU General Public License v2. 29 30 This exception does not invalidate any other reasons why a work 31 based on this file might be covered by the GNU General Public 32 License. 33 34 You should have received copies of the GNU Lesser General Public 35 License and the GNU General Public License along with this library; 36 if not, see <https://www.gnu.org/licenses/>. 37 */ 38 39 /** 40 * @file src/mhd2/events_process.c 41 * @brief The implementation of events processing functions 42 * @author Karlson2k (Evgeny Grin) 43 */ 44 45 #include "mhd_sys_options.h" 46 #include "events_process.h" 47 48 #include "mhd_assert.h" 49 #include "mhd_unreachable.h" 50 51 #if defined(mhd_DEBUG_SUSPEND_RESUME) || defined(mhd_DEBUG_POLLING_FDS) 52 # include <stdio.h> 53 #endif /* mhd_DEBUG_SUSPEND_RESUME */ 54 55 #include "mhd_locks.h" 56 57 #include "mhd_socket_type.h" 58 #include "sys_poll.h" 59 #include "sys_select.h" 60 #ifdef MHD_SUPPORT_EPOLL 61 # include <sys/epoll.h> 62 #endif 63 #ifdef MHD_SOCKETS_KIND_POSIX 64 # include "sys_errno.h" 65 #endif 66 67 #include "mhd_itc.h" 68 69 #include "mhd_panic.h" 70 #include "mhd_dbg_print.h" 71 72 #include "mhd_sockets_macros.h" 73 74 #include "mhd_daemon.h" 75 #include "mhd_connection.h" 76 77 #include "conn_mark_ready.h" 78 #include "daemon_logger.h" 79 #include "daemon_add_conn.h" 80 #include "daemon_funcs.h" 81 #include "conn_data_process.h" 82 #include "stream_funcs.h" 83 #include "extr_events_funcs.h" 84 85 #ifdef MHD_SUPPORT_UPGRADE 86 # include "upgrade_proc.h" 87 #endif /* MHD_SUPPORT_UPGRADE */ 88 89 #ifdef MHD_SUPPORT_HTTPS 90 # include "mhd_tls_funcs.h" 91 #endif 92 93 #ifdef MHD_SUPPORT_HTTP2 94 # include "h2/h2_comm.h" 95 #endif 96 97 #include "mhd_public_api.h" 98 99 #ifdef mhd_DEBUG_POLLING_FDS 100 /** 101 * Debug-printf request of FD polling/monitoring 102 * @param fd_name the name of FD ("ITC", "lstn" or "conn") 103 * @param fd the FD value 104 * @param r_ready the request for read (or receive) readiness 105 * @param w_ready the request for write (or send) readiness 106 * @param e_ready the request for exception (or error) readiness 107 */ 108 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void 109 mhd_dbg_print_fd_mon_req (const char *fd_name, 110 MHD_Socket fd, 111 bool r_ready, 112 bool w_ready, 113 bool e_ready) 114 { 115 char state_str[] = "x:x:x"; 116 state_str[0] = r_ready ? 'R' : '-'; 117 state_str[2] = w_ready ? 'W' : '-'; 118 state_str[4] = e_ready ? 'E' : '-'; 119 120 fprintf (stderr, 121 "### Set FD watching: %4s [%2llu] for %s\n", 122 fd_name, 123 (unsigned long long) fd, 124 state_str); 125 } 126 127 128 /** 129 * Debug-printf reported (by polling) status of FD 130 * @param fd_name the name of FD ("ITC", "lstn" or "conn") 131 * @param fd the FD value 132 * @param r_ready the read (or receive) readiness 133 * @param w_ready the write (or send) readiness 134 * @param e_ready the exception (or error) readiness 135 */ 136 static MHD_FN_PAR_NONNULL_ALL_ void 137 dbg_print_fd_state_update (const char *fd_name, 138 MHD_Socket fd, 139 bool r_ready, 140 bool w_ready, 141 bool e_ready) 142 { 143 char state_str[] = "x:x:x"; 144 state_str[0] = r_ready ? 'R' : '-'; 145 state_str[2] = w_ready ? 'W' : '-'; 146 state_str[4] = e_ready ? 'E' : '-'; 147 148 fprintf (stderr, 149 "### FD state update: %4s [%2llu] -> %s\n", 150 fd_name, 151 (unsigned long long) fd, 152 state_str); 153 } 154 155 156 #else /* ! mhd_DEBUG_POLLING_FDS */ 157 # define dbg_print_fd_state_update(fd_n,fd,r_ready,w_ready,e_ready) \ 158 ((void) 0) 159 #endif /* ! mhd_DEBUG_POLLING_FDS */ 160 161 #ifdef MHD_SUPPORT_THREADS 162 /** 163 * Log error message about broken ITC 164 * @param d the daemon to use 165 */ 166 static MHD_FN_PAR_NONNULL_ALL_ void 167 log_itc_broken (struct MHD_Daemon *restrict d) 168 { 169 mhd_LOG_MSG (d, \ 170 MHD_SC_ITC_STATUS_ERROR, \ 171 "System reported that ITC has an error status or broken."); 172 } 173 174 175 #endif /* MHD_SUPPORT_THREADS */ 176 177 /** 178 * Log error message about broken listen socket 179 * @param d the daemon to use 180 */ 181 static MHD_FN_PAR_NONNULL_ALL_ void 182 log_listen_broken (struct MHD_Daemon *restrict d) 183 { 184 mhd_LOG_MSG (d, MHD_SC_LISTEN_STATUS_ERROR, \ 185 "System reported that the listening socket has an error " \ 186 "status or broken. The daemon will not listen any more."); 187 } 188 189 190 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ uint_fast64_t 191 mhd_daemon_get_wait_max (struct MHD_Daemon *restrict d) 192 { 193 194 mhd_assert (! mhd_D_HAS_WORKERS (d)); 195 196 if (d->events.accept_pending && ! d->conns.block_new) 197 { 198 #ifdef mhd_DEBUG_POLLING_FDS 199 fprintf (stderr, 200 "### mhd_daemon_get_wait_max(daemon) -> zero " 201 "(accept new conn pending)\n"); 202 #endif 203 return 0; 204 } 205 if (d->events.act_req.resume) 206 { 207 #ifdef mhd_DEBUG_POLLING_FDS 208 fprintf (stderr, 209 "### mhd_daemon_get_wait_max(daemon) -> zero " 210 "(resume connection pending)\n"); 211 #endif 212 return 0; 213 } 214 if (NULL != mhd_DLINKEDL_GET_FIRST (&(d->events), proc_ready)) 215 { 216 #ifdef mhd_DEBUG_POLLING_FDS 217 fprintf (stderr, 218 "### mhd_daemon_get_wait_max(daemon) -> zero " 219 "(connection(s) is already ready)\n"); 220 #endif 221 return 0; 222 } 223 224 #ifdef mhd_DEBUG_POLLING_FDS 225 fprintf (stderr, 226 "### mhd_daemon_get_wait_max(daemon) -> MHD_WAIT_INDEFINITELY\n"); 227 #endif 228 return MHD_WAIT_INDEFINITELY; // TODO: calculate correct timeout value 229 } 230 231 232 static MHD_FN_PAR_NONNULL_ALL_ void 233 start_resuming_connection (struct MHD_Connection *restrict c, 234 struct MHD_Daemon *restrict d) 235 { 236 mhd_assert (c->suspended); 237 #ifdef mhd_DEBUG_SUSPEND_RESUME 238 fprintf (stderr, 239 "%%%%%% Resuming connection, FD: %2llu\n", 240 (unsigned long long) c->sk.fd); 241 #endif /* mhd_DEBUG_SUSPEND_RESUME */ 242 c->suspended = false; 243 mhd_stream_resumed_activity_mark (c); 244 mhd_conn_mark_ready (c, d); /* Force processing connection in this round */ 245 } 246 247 248 /** 249 * Check whether any resuming connections are pending and resume them 250 * @param d the daemon to use 251 */ 252 static MHD_FN_PAR_NONNULL_ALL_ void 253 daemon_resume_conns_if_needed (struct MHD_Daemon *restrict d) 254 { 255 struct MHD_Connection *c; 256 257 if (! d->events.act_req.resume) 258 return; 259 260 d->events.act_req.resume = false; /* Reset flag before processing data */ 261 262 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); 263 NULL != c; 264 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 265 { 266 if (c->resuming) 267 start_resuming_connection (c, d); 268 } 269 } 270 271 272 mhd_DATA_TRUNCATION_RUNTIME_CHECK_DISABLE 273 274 static MHD_FN_PAR_NONNULL_ALL_ int 275 get_max_wait (struct MHD_Daemon *restrict d) 276 { 277 uint_fast64_t ui64_wait = mhd_daemon_get_wait_max (d); 278 int i_wait = (int) ui64_wait; 279 280 if ((0 > i_wait) || 281 (ui64_wait != (uint_fast64_t) i_wait)) 282 return INT_MAX; 283 284 return i_wait; 285 } 286 287 288 mhd_DATA_TRUNCATION_RUNTIME_CHECK_RESTORE 289 /* End of warning-less data truncation */ 290 291 292 MHD_FN_PAR_NONNULL_ (1) static void 293 update_conn_net_status (struct MHD_Daemon *restrict d, 294 struct MHD_Connection *restrict c, 295 bool recv_ready, 296 bool send_ready, 297 bool err_state) 298 { 299 enum mhd_SocketNetState sk_state; 300 301 mhd_assert (d == c->daemon); 302 /* "resuming" must be not processed yet */ 303 mhd_assert (! c->resuming || c->suspended); 304 305 dbg_print_fd_state_update ("conn", \ 306 c->sk.fd, \ 307 recv_ready, \ 308 send_ready, \ 309 err_state); 310 311 sk_state = mhd_SOCKET_NET_STATE_NOTHING; 312 if (recv_ready) 313 sk_state = (enum mhd_SocketNetState) 314 (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_RECV_READY); 315 if (send_ready) 316 sk_state = (enum mhd_SocketNetState) 317 (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_SEND_READY); 318 if (err_state) 319 sk_state = (enum mhd_SocketNetState) 320 (sk_state | (unsigned int) mhd_SOCKET_NET_STATE_ERROR_READY); 321 c->sk.ready = sk_state; 322 323 if (! c->suspended) 324 mhd_conn_mark_ready_update3 (c, err_state, d); 325 else 326 mhd_assert (! c->in_proc_ready); 327 } 328 329 330 /** 331 * Accept new connections on the daemon 332 * @param d the daemon to use 333 * @return true if all incoming connections has been accepted, 334 * false if some connection may still wait to be accepted 335 */ 336 MHD_FN_PAR_NONNULL_ (1) static bool 337 daemon_accept_new_conns (struct MHD_Daemon *restrict d) 338 { 339 unsigned int num_to_accept; 340 mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd); 341 mhd_assert (! d->net.listen.is_broken); 342 mhd_assert (! d->conns.block_new); 343 mhd_assert (d->conns.count < d->conns.cfg.count_limit); 344 mhd_assert (! mhd_D_HAS_WORKERS (d)); 345 346 if (! d->net.listen.non_block) 347 num_to_accept = 1; /* listen socket is blocking, only one connection can be processed */ 348 else 349 { 350 const unsigned int slots_left = d->conns.cfg.count_limit - d->conns.count; 351 if (! mhd_D_HAS_MASTER (d)) 352 { 353 /* Fill up to one quarter of allowed limit in one turn */ 354 num_to_accept = d->conns.cfg.count_limit / 4; 355 /* Limit to a reasonable number */ 356 if (((sizeof(void *) > 4) ? 4096 : 1024) < num_to_accept) 357 num_to_accept = ((sizeof(void *) > 4) ? 4096 : 1024); 358 if (slots_left < num_to_accept) 359 num_to_accept = slots_left; 360 } 361 #ifdef MHD_SUPPORT_THREADS 362 else 363 { 364 /* Has workers thread pool. Care must be taken to evenly distribute 365 new connections in the workers pool. 366 At the same time, the burst of new connections should be handled as 367 quick as possible. */ 368 const unsigned int num_conn = d->conns.count; 369 const unsigned int limit = d->conns.cfg.count_limit; 370 const unsigned int num_workers = 371 d->threading.hier.master->threading.hier.pool.num; 372 if (num_conn < limit / 16) 373 { 374 num_to_accept = num_conn / num_workers; 375 if (8 > num_to_accept) 376 { 377 if (8 > slots_left / 16) 378 num_to_accept = slots_left / 16; 379 else 380 num_to_accept = 8; 381 } 382 if (64 < num_to_accept) 383 num_to_accept = 64; 384 } 385 else if (num_conn < limit / 8) 386 { 387 num_to_accept = num_conn * 2 / num_workers; 388 if (8 > num_to_accept) 389 { 390 if (8 > slots_left / 8) 391 num_to_accept = slots_left / 8; 392 else 393 num_to_accept = 8; 394 } 395 if (128 < num_to_accept) 396 num_to_accept = 128; 397 } 398 else if (num_conn < limit / 4) 399 { 400 num_to_accept = num_conn * 4 / num_workers; 401 if (8 > num_to_accept) 402 num_to_accept = 8; 403 if (slots_left / 4 < num_to_accept) 404 num_to_accept = slots_left / 4; 405 if (256 < num_to_accept) 406 num_to_accept = 256; 407 } 408 else if (num_conn < limit / 2) 409 { 410 num_to_accept = num_conn * 8 / num_workers; 411 if (16 > num_to_accept) 412 num_to_accept = 16; 413 if (slots_left / 4 < num_to_accept) 414 num_to_accept = slots_left / 4; 415 if (256 < num_to_accept) 416 num_to_accept = 256; 417 } 418 else if (slots_left > limit / 4) 419 { 420 num_to_accept = slots_left * 4 / num_workers; 421 if (slots_left / 8 < num_to_accept) 422 num_to_accept = slots_left / 8; 423 if (128 < num_to_accept) 424 num_to_accept = 128; 425 } 426 else if (slots_left > limit / 8) 427 { 428 num_to_accept = slots_left * 2 / num_workers; 429 if (slots_left / 16 < num_to_accept) 430 num_to_accept = slots_left / 16; 431 if (64 < num_to_accept) 432 num_to_accept = 64; 433 } 434 else /* (slots_left <= limit / 8) */ 435 num_to_accept = slots_left / 16; 436 437 if (0 == num_to_accept) 438 num_to_accept = 1; 439 else if (slots_left > num_to_accept) 440 num_to_accept = slots_left; 441 } 442 #endif /* MHD_SUPPORT_THREADS */ 443 } 444 445 while (0 != --num_to_accept) 446 { 447 enum mhd_DaemonAcceptResult res; 448 res = mhd_daemon_accept_connection (d); 449 if (mhd_DAEMON_ACCEPT_NO_MORE_PENDING == res) 450 return true; 451 if (mhd_DAEMON_ACCEPT_FAILED == res) 452 return false; /* This is probably "no system resources" error. 453 To do try to accept more connections now. */ 454 } 455 return false; /* More connections may need to be accepted */ 456 } 457 458 459 /** 460 * Check whether particular connection should be excluded from standard HTTP 461 * communication. 462 * @param c the connection the check 463 * @return 'true' if connection should not be used for HTTP communication 464 * 'false' if connection should be processed as HTTP 465 */ 466 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ bool 467 is_conn_excluded_from_http_comm (struct MHD_Connection *restrict c) 468 { 469 #ifdef MHD_SUPPORT_UPGRADE 470 if (NULL != c->upgr.c) 471 { 472 mhd_assert ((mhd_HTTP_STAGE_UPGRADED == c->stage) || \ 473 (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage)); 474 return true; 475 } 476 #endif /* MHD_SUPPORT_UPGRADE */ 477 478 return c->suspended; 479 } 480 481 482 static bool 483 daemon_process_all_active_conns (struct MHD_Daemon *restrict d) 484 { 485 struct MHD_Connection *c; 486 mhd_assert (! mhd_D_HAS_WORKERS (d)); 487 488 c = mhd_DLINKEDL_GET_FIRST (&(d->events),proc_ready); 489 while (NULL != c) 490 { 491 struct MHD_Connection *next; 492 /* The current connection can be closed or removed from 493 "ready" list */ 494 next = mhd_DLINKEDL_GET_NEXT (c, proc_ready); 495 if (! mhd_conn_process_recv_send_data (c)) 496 { 497 mhd_conn_pre_clean (c); 498 mhd_conn_remove_from_daemon (c); 499 mhd_conn_close_final (c); 500 } 501 else 502 { 503 mhd_assert (! c->resuming || c->suspended); 504 } 505 506 c = next; 507 } 508 return true; 509 } 510 511 512 #ifdef MHD_SUPPORT_UPGRADE 513 /** 514 * Clean-up all HTTP-Upgraded connections scheduled for clean-up 515 * @param d the daemon to process 516 */ 517 static MHD_FN_PAR_NONNULL_ALL_ void 518 daemon_cleanup_upgraded_conns (struct MHD_Daemon *d) 519 { 520 volatile struct MHD_Daemon *voltl_d = d; 521 mhd_assert (! mhd_D_HAS_WORKERS (d)); 522 523 if (NULL == mhd_DLINKEDL_GET_FIRST (&(voltl_d->conns.upgr), upgr_cleanup)) 524 return; 525 526 while (true) 527 { 528 struct MHD_Connection *c; 529 530 mhd_mutex_lock_chk (&(d->conns.upgr.ucu_lock)); 531 c = mhd_DLINKEDL_GET_FIRST (&(d->conns.upgr), upgr_cleanup); 532 if (NULL != c) 533 mhd_DLINKEDL_DEL (&(d->conns.upgr), c, upgr_cleanup); 534 mhd_mutex_unlock_chk (&(d->conns.upgr.ucu_lock)); 535 536 if (NULL == c) 537 break; 538 539 mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING == c->stage); 540 mhd_upgraded_deinit (c); 541 mhd_conn_pre_clean (c); 542 mhd_conn_remove_from_daemon (c); 543 mhd_conn_close_final (c); 544 } 545 } 546 547 548 #else /* ! MHD_SUPPORT_UPGRADE */ 549 #define daemon_cleanup_upgraded_conns(d) ((void) d) 550 #endif /* ! MHD_SUPPORT_UPGRADE */ 551 552 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ void 553 mhd_daemon_close_all_conns (struct MHD_Daemon *d) 554 { 555 struct MHD_Connection *c; 556 bool has_upgraded_unclosed; 557 558 has_upgraded_unclosed = false; 559 if (! mhd_D_HAS_THR_PER_CONN (d)) 560 { 561 for (c = mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn); 562 NULL != c; 563 c = mhd_DLINKEDL_GET_LAST (&(d->conns),all_conn)) 564 { 565 #ifdef MHD_SUPPORT_UPGRADE 566 mhd_assert (mhd_HTTP_STAGE_UPGRADING != c->stage); 567 mhd_assert (mhd_HTTP_STAGE_UPGRADED_CLEANING != c->stage); 568 if (NULL != c->upgr.c) 569 { 570 mhd_assert (c == c->upgr.c); 571 has_upgraded_unclosed = true; 572 mhd_upgraded_deinit (c); 573 } 574 else /* Combined with the next 'if' */ 575 #endif 576 if (1) 577 { 578 #ifdef MHD_SUPPORT_HTTP2 579 if (mhd_C_IS_HTTP2 (c)) 580 mhd_h2_conn_h2_deinit_start_closing (c); 581 else 582 #endif /* MHD_SUPPORT_HTTP2 */ 583 mhd_conn_start_closing_d_shutdown (c); 584 } 585 mhd_conn_pre_clean (c); 586 mhd_conn_remove_from_daemon (c); 587 mhd_conn_close_final (c); 588 } 589 } 590 else 591 mhd_assert (0 && "Not implemented yet"); 592 593 if (has_upgraded_unclosed) 594 mhd_LOG_MSG (d, MHD_SC_DAEMON_DESTROYED_WITH_UNCLOSED_UPGRADED, \ 595 "The daemon is being destroyed, but at least one " \ 596 "HTTP-Upgraded connection is unclosed. Any use (including " \ 597 "closing) of such connections is undefined behaviour."); 598 } 599 600 601 /** 602 * Process all external events updated of existing connections, information 603 * about new connections pending to be accept()'ed, presence of the events on 604 * the daemon's ITC; resume connections. 605 * @return 'true' if processed successfully, 606 * 'false' is unrecoverable error occurs and the daemon must be 607 * closed 608 */ 609 static MHD_FN_PAR_NONNULL_ (1) bool 610 ext_events_process_net_updates_and_resume_conn (struct MHD_Daemon *restrict d) 611 { 612 struct MHD_Connection *restrict c; 613 614 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 615 mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type); 616 617 d->events.act_req.resume = false; /* Reset flag before processing data */ 618 619 #ifdef MHD_SUPPORT_THREADS 620 if (d->events.data.extr.itc_data.is_active) 621 { 622 d->events.data.extr.itc_data.is_active = false; 623 /* Clear ITC here, before other data processing. 624 * Any external events will activate ITC again if additional data to 625 * process is added externally. Clearing ITC early ensures that new data 626 * (with additional ITC activation) will not be missed. */ 627 mhd_itc_clear (d->threading.itc); 628 } 629 #endif /* MHD_SUPPORT_THREADS */ 630 631 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); 632 NULL != c; 633 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 634 { 635 bool has_err_state; 636 637 if (c->resuming) 638 start_resuming_connection (c, d); 639 else 640 { 641 if (is_conn_excluded_from_http_comm (c)) 642 { 643 mhd_assert (! c->in_proc_ready); 644 continue; 645 } 646 647 has_err_state = (0 != (((unsigned int) c->sk.ready) 648 & mhd_SOCKET_NET_STATE_ERROR_READY)); 649 650 mhd_conn_mark_ready_update3 (c, 651 has_err_state, 652 d); 653 } 654 } 655 656 return true; 657 } 658 659 660 /** 661 * Update all registrations of FDs for external monitoring. 662 * @return #MHD_SC_OK on success, 663 * error code otherwise 664 */ 665 static MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 666 ext_events_update_registrations (struct MHD_Daemon *restrict d) 667 { 668 const bool rereg_all = d->events.data.extr.reg_all; 669 const bool edge_trigg = (mhd_WM_INT_EXTERNAL_EVENTS_EDGE == d->wmode_int); 670 bool daemon_fds_succeed; 671 struct MHD_Connection *c; 672 struct MHD_Connection *c_next; 673 674 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 675 mhd_assert (mhd_POLL_TYPE_EXT == d->events.poll_type); 676 677 /* (Re-)register daemon's FDs */ 678 679 #ifdef MHD_SUPPORT_THREADS 680 if (rereg_all || 681 (NULL == d->events.data.extr.itc_data.app_cntx)) 682 { 683 /* (Re-)register ITC FD */ 684 d->events.data.extr.itc_data.app_cntx = 685 mhd_daemon_extr_event_reg (d, 686 mhd_itc_r_fd (d->threading.itc), 687 MHD_FD_STATE_RECV_EXCEPT, 688 d->events.data.extr.itc_data.app_cntx, 689 (struct MHD_EventUpdateContext *) 690 mhd_SOCKET_REL_MARKER_ITC); 691 } 692 daemon_fds_succeed = (NULL != d->events.data.extr.itc_data.app_cntx); 693 #else /* ! MHD_SUPPORT_THREADS */ 694 daemon_fds_succeed = true; 695 #endif /* ! MHD_SUPPORT_THREADS */ 696 697 if (daemon_fds_succeed) 698 { 699 if ((MHD_INVALID_SOCKET == d->net.listen.fd) && 700 (NULL != d->events.data.extr.listen_data.app_cntx)) 701 { 702 /* De-register the listen FD */ 703 d->events.data.extr.listen_data.app_cntx = 704 mhd_daemon_extr_event_reg (d, 705 d->net.listen.fd, 706 MHD_FD_STATE_NONE, 707 d->events.data.extr.listen_data.app_cntx, 708 (struct MHD_EventUpdateContext *) 709 mhd_SOCKET_REL_MARKER_LISTEN); 710 if (NULL != d->events.data.extr.listen_data.app_cntx) 711 mhd_log_extr_event_dereg_failed (d); 712 } 713 else if ((MHD_INVALID_SOCKET != d->net.listen.fd) && 714 (rereg_all || (NULL == d->events.data.extr.listen_data.app_cntx))) 715 { 716 /* (Re-)register listen FD */ 717 d->events.data.extr.listen_data.app_cntx = 718 mhd_daemon_extr_event_reg (d, 719 d->net.listen.fd, 720 MHD_FD_STATE_RECV_EXCEPT, 721 d->events.data.extr.listen_data.app_cntx, 722 (struct MHD_EventUpdateContext *) 723 mhd_SOCKET_REL_MARKER_LISTEN); 724 725 daemon_fds_succeed = (NULL != d->events.data.extr.listen_data.app_cntx); 726 } 727 } 728 729 if (! daemon_fds_succeed) 730 { 731 mhd_LOG_MSG (d, MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE, \ 732 "Failed to register daemon FDs in the application " 733 "(external events) monitoring."); 734 return MHD_SC_EXT_EVENT_REG_DAEMON_FDS_FAILURE; 735 } 736 737 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); 738 NULL != c; 739 c = c_next) 740 { 741 enum MHD_FdState watch_for; 742 743 /* Get the next connection now, as the current connection could be removed 744 from the daemon. */ 745 c_next = mhd_DLINKEDL_GET_NEXT (c,all_conn); 746 747 mhd_assert (! c->resuming || c->suspended); 748 749 if (is_conn_excluded_from_http_comm (c)) 750 { 751 if (NULL != c->extr_event.app_cntx) 752 { 753 /* De-register the connection socket FD */ 754 c->extr_event.app_cntx = 755 mhd_daemon_extr_event_reg (d, 756 c->sk.fd, 757 MHD_FD_STATE_NONE, 758 c->extr_event.app_cntx, 759 (struct MHD_EventUpdateContext *) c); 760 if (NULL != c->extr_event.app_cntx) 761 mhd_log_extr_event_dereg_failed (d); 762 } 763 continue; 764 } 765 766 watch_for = 767 edge_trigg ? 768 MHD_FD_STATE_RECV_SEND_EXCEPT : 769 (enum MHD_FdState) (MHD_FD_STATE_EXCEPT 770 | (((unsigned int) c->event_loop_info) 771 & (MHD_EVENT_LOOP_INFO_RECV 772 | MHD_EVENT_LOOP_INFO_SEND))); 773 774 mhd_assert ((! edge_trigg) || \ 775 (MHD_FD_STATE_RECV_SEND_EXCEPT == c->extr_event.reg_for) || \ 776 (NULL == c->extr_event.app_cntx)); 777 778 if ((NULL == c->extr_event.app_cntx) || 779 rereg_all || 780 (! edge_trigg && (watch_for != c->extr_event.reg_for))) 781 { 782 /* (Re-)register the connection socket FD */ 783 c->extr_event.app_cntx = 784 mhd_daemon_extr_event_reg (d, 785 c->sk.fd, 786 watch_for, 787 c->extr_event.app_cntx, 788 (struct MHD_EventUpdateContext *) c); 789 if (NULL == c->extr_event.app_cntx) 790 { 791 mhd_conn_start_closing_ext_event_failed (c); 792 mhd_conn_pre_clean (c); 793 mhd_conn_remove_from_daemon (c); 794 mhd_conn_close_final (c); 795 } 796 c->extr_event.reg_for = watch_for; 797 } 798 } 799 800 return MHD_SC_OK; 801 } 802 803 804 #ifdef MHD_SUPPORT_SELECT 805 806 /** 807 * Add socket to the fd_set 808 * @param fd the socket to add 809 * @param fs the pointer to fd_set 810 * @param max the pointer to variable to be updated with maximum FD value (or 811 * set to non-zero in case of WinSock) 812 * @param d the daemon object 813 */ 814 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ 815 MHD_FN_PAR_INOUT_ (2) 816 MHD_FN_PAR_INOUT_ (3) void 817 fd_set_wrap (MHD_Socket fd, 818 fd_set *restrict fs, 819 int *restrict max, 820 struct MHD_Daemon *restrict d) 821 { 822 mhd_assert (mhd_FD_FITS_DAEMON (d, fd)); /* Must be checked for every FD before 823 it is added */ 824 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 825 (void) d; /* Unused with non-debug builds */ 826 #if defined(MHD_SOCKETS_KIND_POSIX) 827 FD_SET (fd, fs); 828 if (*max < fd) 829 *max = fd; 830 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 831 /* Use custom set function to take advantage of know uniqueness of 832 * used sockets (to skip useless (for this function) check for duplicated 833 * sockets implemented in system's macro). */ 834 mhd_assert (fs->fd_count < FD_SETSIZE - 1); /* Daemon limits set to always fit FD_SETSIZE */ 835 mhd_assert (! FD_ISSET (fd, fs)); /* All sockets must be unique */ 836 fs->fd_array[fs->fd_count++] = fd; 837 *max = 1; 838 #else 839 #error Unknown sockets type 840 #endif 841 } 842 843 844 /** 845 * Set daemon's FD_SETs to monitor all daemon's sockets 846 * @param d the daemon to use 847 * @param listen_only set to 'true' if connections's sockets should NOT 848 * be monitored 849 * @return with POSIX sockets: the maximum number of the socket used in 850 * the FD_SETs; 851 * with winsock: non-zero if at least one socket has been added to 852 * the FD_SETs, 853 * zero if no sockets in the FD_SETs 854 */ 855 static MHD_FN_PAR_NONNULL_ (1) int 856 select_update_fdsets (struct MHD_Daemon *restrict d, 857 bool listen_only) 858 { 859 struct MHD_Connection *c; 860 fd_set *const restrict rfds = d->events.data.select.rfds; 861 fd_set *const restrict wfds = d->events.data.select.wfds; 862 fd_set *const restrict efds = d->events.data.select.efds; 863 int ret; 864 865 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 866 mhd_assert (NULL != rfds); 867 mhd_assert (NULL != wfds); 868 mhd_assert (NULL != efds); 869 FD_ZERO (rfds); 870 FD_ZERO (wfds); 871 FD_ZERO (efds); 872 873 ret = 0; 874 #ifdef MHD_SUPPORT_THREADS 875 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 876 fd_set_wrap (mhd_itc_r_fd (d->threading.itc), 877 rfds, 878 &ret, 879 d); 880 fd_set_wrap (mhd_itc_r_fd (d->threading.itc), 881 efds, 882 &ret, 883 d); 884 mhd_dbg_print_fd_mon_req ("ITC", \ 885 mhd_itc_r_fd (d->threading.itc), \ 886 true, \ 887 false, \ 888 true); 889 #endif 890 if ((MHD_INVALID_SOCKET != d->net.listen.fd) 891 && ! d->conns.block_new) 892 { 893 mhd_assert (! d->net.listen.is_broken); 894 895 fd_set_wrap (d->net.listen.fd, 896 rfds, 897 &ret, 898 d); 899 fd_set_wrap (d->net.listen.fd, 900 efds, 901 &ret, 902 d); 903 mhd_dbg_print_fd_mon_req ("lstn", \ 904 d->net.listen.fd, \ 905 true, \ 906 false, \ 907 true); 908 } 909 if (listen_only) 910 return ret; 911 912 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); NULL != c; 913 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 914 { 915 mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage); 916 if (is_conn_excluded_from_http_comm (c)) 917 continue; 918 919 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)) 920 fd_set_wrap (c->sk.fd, 921 rfds, 922 &ret, 923 d); 924 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND)) 925 fd_set_wrap (c->sk.fd, 926 wfds, 927 &ret, 928 d); 929 fd_set_wrap (c->sk.fd, 930 efds, 931 &ret, 932 d); 933 mhd_dbg_print_fd_mon_req ("conn", \ 934 c->sk.fd, \ 935 FD_ISSET (c->sk.fd, rfds), \ 936 FD_ISSET (c->sk.fd, wfds), \ 937 true); 938 } 939 940 return ret; 941 } 942 943 944 static MHD_FN_PAR_NONNULL_ (1) bool 945 select_update_statuses_from_fdsets_and_resume_conn (struct MHD_Daemon *d, 946 int num_events) 947 { 948 struct MHD_Connection *c; 949 fd_set *const restrict rfds = d->events.data.select.rfds; 950 fd_set *const restrict wfds = d->events.data.select.wfds; 951 fd_set *const restrict efds = d->events.data.select.efds; 952 bool resuming_conn; 953 954 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 955 mhd_assert (0 <= num_events); 956 mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements); 957 958 resuming_conn = d->events.act_req.resume; 959 if (resuming_conn) 960 { 961 mhd_assert (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type)); 962 mhd_assert (! mhd_D_HAS_THR_PER_CONN (d)); 963 num_events = (int) -1; /* Force process all connections */ 964 d->events.act_req.resume = false; 965 } 966 967 #ifndef MHD_FAVOR_SMALL_CODE 968 if (0 == num_events) 969 return true; 970 #endif /* MHD_FAVOR_SMALL_CODE */ 971 972 #ifdef MHD_SUPPORT_THREADS 973 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 974 dbg_print_fd_state_update ("ITC", \ 975 mhd_itc_r_fd (d->threading.itc), \ 976 FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds), \ 977 FD_ISSET (mhd_itc_r_fd (d->threading.itc), wfds), \ 978 FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds)); 979 if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), efds)) 980 { 981 log_itc_broken (d); 982 /* ITC is broken, need to stop the daemon thread now as otherwise 983 application will not be able to stop the thread. */ 984 return false; 985 } 986 if (FD_ISSET (mhd_itc_r_fd (d->threading.itc), rfds)) 987 { 988 --num_events; 989 /* Clear ITC here, before other data processing. 990 * Any external events will activate ITC again if additional data to 991 * process is added externally. Clearing ITC early ensures that new data 992 * (with additional ITC activation) will not be missed. */ 993 mhd_itc_clear (d->threading.itc); 994 } 995 996 #ifndef MHD_FAVOR_SMALL_CODE 997 if (0 == num_events) 998 return true; 999 #endif /* MHD_FAVOR_SMALL_CODE */ 1000 #endif /* MHD_SUPPORT_THREADS */ 1001 1002 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1003 { 1004 mhd_assert (! d->net.listen.is_broken); 1005 dbg_print_fd_state_update ("lstn", \ 1006 d->net.listen.fd, \ 1007 FD_ISSET (d->net.listen.fd, rfds), \ 1008 FD_ISSET (d->net.listen.fd, wfds), \ 1009 FD_ISSET (d->net.listen.fd, efds)); 1010 if (FD_ISSET (d->net.listen.fd, efds)) 1011 { 1012 --num_events; 1013 log_listen_broken (d); 1014 /* Close the listening socket unless the master daemon should close it */ 1015 if (! mhd_D_HAS_MASTER (d)) 1016 mhd_socket_close (d->net.listen.fd); 1017 1018 d->events.accept_pending = false; 1019 d->net.listen.is_broken = true; 1020 /* Stop monitoring socket to avoid spinning with busy-waiting */ 1021 d->net.listen.fd = MHD_INVALID_SOCKET; 1022 } 1023 else 1024 { 1025 d->events.accept_pending = FD_ISSET (d->net.listen.fd, rfds); 1026 if (d->events.accept_pending) 1027 --num_events; 1028 } 1029 } 1030 1031 mhd_assert ((0 == num_events) || \ 1032 (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type))); 1033 1034 #ifdef MHD_FAVOR_SMALL_CODE 1035 (void) num_events; 1036 num_events = 1; /* Use static value to minimise the binary size of the next loop */ 1037 #endif /* ! MHD_FAVOR_SMALL_CODE */ 1038 1039 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns), all_conn); 1040 (NULL != c) && (0 != num_events); 1041 c = mhd_DLINKEDL_GET_NEXT (c, all_conn)) 1042 { 1043 if (c->resuming) 1044 start_resuming_connection (c, d); 1045 else 1046 { 1047 MHD_Socket sk; 1048 bool recv_ready; 1049 bool send_ready; 1050 bool err_state; 1051 1052 if (is_conn_excluded_from_http_comm (c)) 1053 continue; 1054 1055 sk = c->sk.fd; 1056 recv_ready = FD_ISSET (sk, rfds); 1057 send_ready = FD_ISSET (sk, wfds); 1058 err_state = FD_ISSET (sk, efds); 1059 1060 update_conn_net_status (d, 1061 c, 1062 recv_ready, 1063 send_ready, 1064 err_state); 1065 #ifndef MHD_FAVOR_SMALL_CODE 1066 if (recv_ready || send_ready || err_state) 1067 --num_events; 1068 #endif /* MHD_FAVOR_SMALL_CODE */ 1069 } 1070 } 1071 1072 #ifndef MHD_FAVOR_SMALL_CODE 1073 // TODO: recheck functionality with HTTP/2 1074 // mhd_assert ((0 == num_events) || resuming_conn); 1075 #endif /* MHD_FAVOR_SMALL_CODE */ 1076 return true; 1077 } 1078 1079 1080 /** 1081 * Update states of all connections, check for connection pending 1082 * to be accept()'ed, check for the events on ITC; resume connections 1083 * @param listen_only set to 'true' if connections's sockets should NOT 1084 * be monitored 1085 * @return 'true' if processed successfully, 1086 * 'false' is unrecoverable error occurs and the daemon must be 1087 * closed 1088 */ 1089 static MHD_FN_PAR_NONNULL_ (1) bool 1090 get_all_net_updates_by_select_and_resume_conn (struct MHD_Daemon *restrict d, 1091 bool listen_only) 1092 { 1093 int max_socket; 1094 int max_wait; 1095 struct timeval tmvl; 1096 int num_events; 1097 mhd_assert (mhd_POLL_TYPE_SELECT == d->events.poll_type); 1098 1099 max_socket = select_update_fdsets (d, 1100 listen_only); 1101 1102 max_wait = get_max_wait (d); // TODO: use correct timeout value 1103 1104 #ifdef MHD_SOCKETS_KIND_WINSOCK 1105 if (0 == max_socket) 1106 { 1107 Sleep ((unsigned int) max_wait); 1108 return true; 1109 } 1110 #endif /* MHD_SOCKETS_KIND_WINSOCK */ 1111 1112 tmvl.tv_sec = max_wait / 1000; 1113 #ifndef MHD_SOCKETS_KIND_WINSOCK 1114 tmvl.tv_usec = (uint_least16_t) ((max_wait % 1000) * 1000); 1115 #else 1116 tmvl.tv_usec = (int) ((max_wait % 1000) * 1000); 1117 #endif 1118 1119 #ifdef mhd_DEBUG_POLLING_FDS 1120 fprintf (stderr, 1121 "### (Starting) select(%d, rfds, wfds, efds, [%llu, %llu])...\n", 1122 max_socket + 1, 1123 (unsigned long long) tmvl.tv_sec, 1124 (unsigned long long) tmvl.tv_usec); 1125 #endif /* mhd_DEBUG_POLLING_FDS */ 1126 num_events = select (max_socket + 1, 1127 d->events.data.select.rfds, 1128 d->events.data.select.wfds, 1129 d->events.data.select.efds, 1130 &tmvl); 1131 #ifdef mhd_DEBUG_POLLING_FDS 1132 fprintf (stderr, 1133 "### (Finished) select(%d, rfds, wfds, efds, ->[%llu, %llu]) -> " 1134 "%d\n", 1135 max_socket + 1, 1136 (unsigned long long) tmvl.tv_sec, 1137 (unsigned long long) tmvl.tv_usec, 1138 num_events); 1139 #endif /* mhd_DEBUG_POLLING_FDS */ 1140 1141 if (0 > num_events) 1142 { 1143 int err; 1144 bool is_hard_error; 1145 bool is_ignored_error; 1146 is_hard_error = false; 1147 is_ignored_error = false; 1148 #if defined(MHD_SOCKETS_KIND_POSIX) 1149 err = errno; 1150 if (0 != err) 1151 { 1152 is_hard_error = 1153 ((mhd_EBADF_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err)); 1154 is_ignored_error = (mhd_EINTR_OR_ZERO == err); 1155 } 1156 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 1157 err = WSAGetLastError (); 1158 is_hard_error = 1159 ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err) || 1160 (WSANOTINITIALISED == err)); 1161 #endif 1162 if (! is_ignored_error) 1163 { 1164 if (is_hard_error) 1165 { 1166 mhd_LOG_MSG (d, MHD_SC_SELECT_HARD_ERROR, \ 1167 "The select() encountered unrecoverable error."); 1168 return false; 1169 } 1170 mhd_LOG_MSG (d, MHD_SC_SELECT_SOFT_ERROR, \ 1171 "The select() encountered error."); 1172 return true; 1173 } 1174 } 1175 1176 return select_update_statuses_from_fdsets_and_resume_conn (d, num_events); 1177 } 1178 1179 1180 #endif /* MHD_SUPPORT_SELECT */ 1181 1182 1183 #ifdef MHD_SUPPORT_POLL 1184 1185 static MHD_FN_PAR_NONNULL_ (1) unsigned int 1186 poll_update_fds (struct MHD_Daemon *restrict d, 1187 bool listen_only) 1188 { 1189 unsigned int i_s; 1190 unsigned int i_c; 1191 struct MHD_Connection *restrict c; 1192 #ifndef NDEBUG 1193 unsigned int num_skipped = 0; 1194 #endif /* ! NDEBUG */ 1195 1196 mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type); 1197 1198 i_s = 0; 1199 #ifdef MHD_SUPPORT_THREADS 1200 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 1201 mhd_assert (d->events.data.poll.fds[i_s].fd == \ 1202 mhd_itc_r_fd (d->threading.itc)); 1203 mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \ 1204 d->events.data.poll.rel[i_s].fd_id); 1205 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1206 mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events); 1207 #else /* HAVE_POLL_CLOBBERS_EVENTS */ 1208 d->events.data.poll.fds[i_s].events = POLLIN; 1209 #endif /* HAVE_POLL_CLOBBERS_EVENTS */ 1210 mhd_dbg_print_fd_mon_req ("ITC", \ 1211 mhd_itc_r_fd (d->threading.itc), \ 1212 true, \ 1213 false, \ 1214 false); 1215 ++i_s; 1216 #endif 1217 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1218 { 1219 mhd_assert (! d->net.listen.is_broken); 1220 mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd); 1221 mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \ 1222 d->events.data.poll.rel[i_s].fd_id); 1223 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1224 mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) || 1225 (0 == d->events.data.poll.fds[i_s].events)); 1226 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */ 1227 d->events.data.poll.fds[i_s].events = d->conns.block_new ? 0 : POLLIN; 1228 mhd_dbg_print_fd_mon_req ("lstn", \ 1229 d->net.listen.fd, \ 1230 POLLIN == d->events.data.poll.fds[i_s].events, \ 1231 false, \ 1232 false); 1233 ++i_s; 1234 } 1235 if (listen_only) 1236 return i_s; 1237 1238 i_c = i_s; 1239 for (c = mhd_DLINKEDL_GET_FIRST (&(d->conns),all_conn); NULL != c; 1240 c = mhd_DLINKEDL_GET_NEXT (c,all_conn)) 1241 { 1242 unsigned short events; /* 'unsigned' for correct bits manipulations */ 1243 1244 if (is_conn_excluded_from_http_comm (c)) 1245 { 1246 #ifndef NDEBUG 1247 ++num_skipped; 1248 #endif /* ! NDEBUG */ 1249 continue; 1250 } 1251 1252 mhd_assert ((i_c - i_s) < d->conns.cfg.count_limit); 1253 mhd_assert (i_c < d->dbg.num_events_elements); 1254 mhd_assert (mhd_HTTP_STAGE_CLOSED != c->stage); 1255 1256 d->events.data.poll.fds[i_c].fd = c->sk.fd; 1257 d->events.data.poll.rel[i_c].connection = c; 1258 events = 0; 1259 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)) 1260 events |= MHD_POLL_IN; 1261 if (0 != (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND)) 1262 events |= MHD_POLL_OUT; 1263 1264 d->events.data.poll.fds[i_c].events = (short) events; 1265 mhd_dbg_print_fd_mon_req ("conn", \ 1266 c->sk.fd, \ 1267 MHD_POLL_IN == (MHD_POLL_IN & events), \ 1268 MHD_POLL_OUT == (MHD_POLL_OUT & events), \ 1269 false); 1270 ++i_c; 1271 } 1272 mhd_assert ((d->conns.count - num_skipped) == (i_c - i_s)); 1273 mhd_assert (i_c <= d->dbg.num_events_elements); 1274 return i_c; 1275 } 1276 1277 1278 static MHD_FN_PAR_NONNULL_ (1) bool 1279 poll_update_statuses_from_fds (struct MHD_Daemon *restrict d, 1280 int num_events) 1281 { 1282 unsigned int i_s; 1283 unsigned int i_c; 1284 mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type); 1285 mhd_assert (0 <= num_events); 1286 mhd_assert (((unsigned int) num_events) <= d->dbg.num_events_elements); 1287 1288 if (0 == num_events) 1289 return true; 1290 1291 i_s = 0; 1292 #ifdef MHD_SUPPORT_THREADS 1293 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 1294 mhd_assert (d->events.data.poll.fds[i_s].fd == \ 1295 mhd_itc_r_fd (d->threading.itc)); 1296 mhd_assert (mhd_SOCKET_REL_MARKER_ITC == \ 1297 d->events.data.poll.rel[i_s].fd_id); 1298 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1299 mhd_assert (POLLIN == d->events.data.poll.fds[i_s].events); 1300 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */ 1301 dbg_print_fd_state_update ( \ 1302 "ITC", \ 1303 d->events.data.poll.fds[i_s].fd, \ 1304 0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN)), \ 1305 0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_OUT | POLLOUT)), \ 1306 0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL))); 1307 1308 if (0 != (d->events.data.poll.fds[i_s].revents & (POLLERR | POLLNVAL))) 1309 { 1310 log_itc_broken (d); 1311 /* ITC is broken, need to stop the daemon thread now as otherwise 1312 application will not be able to stop the thread. */ 1313 return false; 1314 } 1315 if (0 != (d->events.data.poll.fds[i_s].revents & (MHD_POLL_IN | POLLIN))) 1316 { 1317 --num_events; 1318 /* Clear ITC here, before other data processing. 1319 * Any external events will activate ITC again if additional data to 1320 * process is added externally. Clearing ITC early ensures that new data 1321 * (with additional ITC activation) will not be missed. */ 1322 mhd_itc_clear (d->threading.itc); 1323 } 1324 ++i_s; 1325 1326 if (0 == num_events) 1327 return true; 1328 #endif /* MHD_SUPPORT_THREADS */ 1329 1330 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1331 { 1332 const short revents = d->events.data.poll.fds[i_s].revents; 1333 1334 mhd_assert (! d->net.listen.is_broken); 1335 mhd_assert (d->events.data.poll.fds[i_s].fd == d->net.listen.fd); 1336 mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN == \ 1337 d->events.data.poll.rel[i_s].fd_id); 1338 #ifndef HAVE_POLL_CLOBBERS_EVENTS 1339 mhd_assert ((POLLIN == d->events.data.poll.fds[i_s].events) || 1340 (0 == d->events.data.poll.fds[i_s].events)); 1341 #endif /* ! HAVE_POLL_CLOBBERS_EVENTS */ 1342 dbg_print_fd_state_update ("lstn", \ 1343 d->events.data.poll.fds[i_s].fd, \ 1344 0 != (revents & (MHD_POLL_IN | POLLIN)), \ 1345 0 != (revents & (MHD_POLL_OUT | POLLOUT)), \ 1346 0 != (revents & (POLLERR | POLLNVAL | POLLHUP))); 1347 if (0 != (revents & (POLLERR | POLLNVAL | POLLHUP))) 1348 { 1349 --num_events; 1350 log_listen_broken (d); 1351 /* Close the listening socket unless the master daemon should close it */ 1352 if (! mhd_D_HAS_MASTER (d)) 1353 mhd_socket_close (d->net.listen.fd); 1354 1355 d->events.accept_pending = false; 1356 d->net.listen.is_broken = true; 1357 /* Stop monitoring socket to avoid spinning with busy-waiting */ 1358 d->net.listen.fd = MHD_INVALID_SOCKET; 1359 } 1360 else 1361 { 1362 const bool has_new_conns = (0 != (revents & (MHD_POLL_IN | POLLIN))); 1363 if (has_new_conns) 1364 { 1365 --num_events; 1366 d->events.accept_pending = true; 1367 } 1368 else 1369 { 1370 /* Check whether the listen socket was monitored for incoming 1371 connections */ 1372 if (0 != (d->events.data.poll.fds[i_s].events & POLLIN)) 1373 d->events.accept_pending = false; 1374 } 1375 } 1376 ++i_s; 1377 } 1378 1379 mhd_assert ((0 == num_events) || \ 1380 (! mhd_D_TYPE_IS_LISTEN_ONLY (d->threading.d_type))); 1381 1382 for (i_c = i_s; (i_c < i_s + d->conns.count) && (0 < num_events); ++i_c) 1383 { 1384 struct MHD_Connection *restrict c; 1385 bool recv_ready; 1386 bool send_ready; 1387 bool err_state; 1388 short revents; 1389 mhd_assert (i_c < d->dbg.num_events_elements); 1390 mhd_assert (mhd_SOCKET_REL_MARKER_EMPTY != \ 1391 d->events.data.poll.rel[i_c].fd_id); 1392 mhd_assert (mhd_SOCKET_REL_MARKER_ITC != \ 1393 d->events.data.poll.rel[i_c].fd_id); 1394 mhd_assert (mhd_SOCKET_REL_MARKER_LISTEN != \ 1395 d->events.data.poll.rel[i_c].fd_id); 1396 1397 c = d->events.data.poll.rel[i_c].connection; 1398 mhd_assert (! is_conn_excluded_from_http_comm (c)); 1399 mhd_assert (c->sk.fd == d->events.data.poll.fds[i_c].fd); 1400 revents = d->events.data.poll.fds[i_c].revents; 1401 recv_ready = (0 != (revents & (MHD_POLL_IN | POLLIN))); 1402 send_ready = (0 != (revents & (MHD_POLL_OUT | POLLOUT))); 1403 #ifndef MHD_POLLHUP_ON_REM_SHUT_WR 1404 err_state = (0 != (revents & (POLLHUP | POLLERR | POLLNVAL))); 1405 #else 1406 err_state = (0 != (revents & (POLLERR | POLLNVAL))); 1407 if (0 != (revents & POLLHUP)) 1408 { /* This can be a disconnect OR remote side set SHUT_WR */ 1409 recv_ready = true; /* Check the socket by reading */ 1410 if (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV)) 1411 err_state = true; /* The socket will not be checked by reading, the only way to avoid spinning */ 1412 } 1413 #endif 1414 if (0 != (revents & (MHD_POLLPRI | MHD_POLLRDBAND))) 1415 { /* Statuses were not requested, but returned */ 1416 if (! recv_ready || 1417 (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_RECV))) 1418 err_state = true; /* The socket will not be read, the only way to avoid spinning */ 1419 } 1420 if (0 != (revents & MHD_POLLWRBAND)) 1421 { /* Status was not requested, but returned */ 1422 if (! send_ready || 1423 (0 == (c->event_loop_info & MHD_EVENT_LOOP_INFO_SEND))) 1424 err_state = true; /* The socket will not be written, the only way to avoid spinning */ 1425 } 1426 1427 update_conn_net_status (d, c, recv_ready, send_ready, err_state); 1428 } 1429 mhd_assert (d->conns.count >= (i_c - i_s)); 1430 mhd_assert (i_c <= d->dbg.num_events_elements); 1431 return true; 1432 } 1433 1434 1435 static MHD_FN_PAR_NONNULL_ (1) bool 1436 get_all_net_updates_by_poll (struct MHD_Daemon *restrict d, 1437 bool listen_only) 1438 { 1439 #ifdef mhd_DEBUG_POLLING_FDS 1440 # ifdef MHD_SOCKETS_KIND_POSIX 1441 static const char poll_fn_name[] = "poll"; 1442 # else /* MHD_SOCKETS_KIND_WINSOCK */ 1443 static const char poll_fn_name[] = "WSAPoll"; 1444 # endif /* MHD_SOCKETS_KIND_WINSOCK */ 1445 #endif /* mhd_DEBUG_POLLING_FDS */ 1446 unsigned int num_fds; 1447 int max_wait; 1448 int num_events; 1449 1450 mhd_assert (mhd_POLL_TYPE_POLL == d->events.poll_type); 1451 1452 num_fds = poll_update_fds (d, listen_only); 1453 1454 // TODO: handle empty list situation 1455 max_wait = get_max_wait (d); // TODO: use correct timeout value 1456 1457 #ifdef mhd_DEBUG_POLLING_FDS 1458 fprintf (stderr, 1459 "### (Starting) %s(fds, %u, %d)...\n", 1460 poll_fn_name, 1461 num_fds, 1462 max_wait); 1463 #endif /* mhd_DEBUG_POLLING_FDS */ 1464 num_events = mhd_poll (d->events.data.poll.fds, 1465 num_fds, 1466 max_wait); // TODO: use correct timeout value 1467 #ifdef mhd_DEBUG_POLLING_FDS 1468 fprintf (stderr, 1469 "### (Finished) %s(fds, %u, %d) -> %d\n", 1470 poll_fn_name, 1471 num_fds, 1472 max_wait, 1473 num_events); 1474 #endif /* mhd_DEBUG_POLLING_FDS */ 1475 if (0 > num_events) 1476 { 1477 int err; 1478 bool is_hard_error; 1479 bool is_ignored_error; 1480 is_hard_error = false; 1481 is_ignored_error = false; 1482 #if defined(MHD_SOCKETS_KIND_POSIX) 1483 err = errno; 1484 if (0 != err) 1485 { 1486 is_hard_error = 1487 ((mhd_EFAULT_OR_ZERO == err) || (mhd_EINVAL_OR_ZERO == err)); 1488 is_ignored_error = (mhd_EINTR_OR_ZERO == err); 1489 } 1490 #elif defined(MHD_SOCKETS_KIND_WINSOCK) 1491 err = WSAGetLastError (); 1492 is_hard_error = 1493 ((WSAENETDOWN == err) || (WSAEFAULT == err) || (WSAEINVAL == err)); 1494 #endif 1495 if (! is_ignored_error) 1496 { 1497 if (is_hard_error) 1498 { 1499 mhd_LOG_MSG (d, MHD_SC_POLL_HARD_ERROR, \ 1500 "The poll() encountered unrecoverable error."); 1501 return false; 1502 } 1503 mhd_LOG_MSG (d, MHD_SC_POLL_SOFT_ERROR, \ 1504 "The poll() encountered error."); 1505 } 1506 return true; 1507 } 1508 1509 return poll_update_statuses_from_fds (d, num_events); 1510 } 1511 1512 1513 #endif /* MHD_SUPPORT_POLL */ 1514 1515 #ifdef MHD_SUPPORT_EPOLL 1516 1517 /** 1518 * Map events provided by epoll to connection states, ITC and 1519 * listen socket states 1520 */ 1521 static MHD_FN_PAR_NONNULL_ (1) bool 1522 update_statuses_from_eevents (struct MHD_Daemon *restrict d, 1523 unsigned int num_events) 1524 { 1525 unsigned int i; 1526 struct epoll_event *const restrict events = 1527 d->events.data.epoll.events; 1528 for (i = 0; num_events > i; ++i) 1529 { 1530 struct epoll_event *const e = events + i; 1531 #ifdef MHD_SUPPORT_THREADS 1532 if (((uint64_t) mhd_SOCKET_REL_MARKER_ITC) == e->data.u64) /* uint64_t is in the system header */ 1533 { 1534 mhd_assert (mhd_ITC_IS_VALID (d->threading.itc)); 1535 dbg_print_fd_state_update ( \ 1536 "ITC", \ 1537 mhd_itc_r_fd (d->threading.itc), \ 1538 0 != (e->events & EPOLLIN), \ 1539 0 != (e->events & EPOLLOUT), \ 1540 0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))); 1541 1542 if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))) 1543 { 1544 log_itc_broken (d); 1545 /* ITC is broken, need to stop the daemon thread now as otherwise 1546 application will not be able to stop the thread. */ 1547 return false; 1548 } 1549 if (0 != (e->events & EPOLLIN)) 1550 { 1551 /* Clear ITC here, before other data processing. 1552 * Any external events will activate ITC again if additional data to 1553 * process is added externally. Clearing ITC early ensures that new data 1554 * (with additional ITC activation) will not be missed. */ 1555 mhd_itc_clear (d->threading.itc); 1556 } 1557 } 1558 else 1559 #endif /* MHD_SUPPORT_THREADS */ 1560 if (((uint64_t) mhd_SOCKET_REL_MARKER_LISTEN) == e->data.u64) /* uint64_t is in the system header */ 1561 { 1562 mhd_assert (MHD_INVALID_SOCKET != d->net.listen.fd); 1563 dbg_print_fd_state_update ( \ 1564 "lstn", \ 1565 d->net.listen.fd, \ 1566 0 != (e->events & EPOLLIN), \ 1567 0 != (e->events & EPOLLOUT), \ 1568 0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))); 1569 if (0 != (e->events & (EPOLLPRI | EPOLLERR | EPOLLHUP))) 1570 { 1571 log_listen_broken (d); 1572 1573 /* Close the listening socket unless the master daemon should close it */ 1574 if (! mhd_D_HAS_MASTER (d)) 1575 mhd_socket_close (d->net.listen.fd); 1576 else 1577 { 1578 /* Ignore possible error as the socket could be already removed 1579 from the epoll monitoring by closing the socket */ 1580 (void) epoll_ctl (d->events.data.epoll.e_fd, 1581 EPOLL_CTL_DEL, 1582 d->net.listen.fd, 1583 NULL); 1584 } 1585 1586 d->events.accept_pending = false; 1587 d->net.listen.is_broken = true; 1588 d->net.listen.fd = MHD_INVALID_SOCKET; 1589 } 1590 else 1591 d->events.accept_pending = (0 != (e->events & EPOLLIN)); 1592 } 1593 else 1594 { 1595 bool recv_ready; 1596 bool send_ready; 1597 bool err_state; 1598 struct MHD_Connection *const restrict c = 1599 (struct MHD_Connection *) e->data.ptr; 1600 mhd_assert (! is_conn_excluded_from_http_comm (c)); 1601 recv_ready = (0 != (e->events & (EPOLLIN | EPOLLERR | EPOLLHUP))); 1602 send_ready = (0 != (e->events & (EPOLLOUT | EPOLLERR | EPOLLHUP))); 1603 err_state = (0 != (e->events & (EPOLLERR | EPOLLHUP))); 1604 1605 update_conn_net_status (d, c, recv_ready, send_ready, err_state); 1606 } 1607 } 1608 return true; 1609 } 1610 1611 1612 /** 1613 * Update states of all connections, check for connection pending 1614 * to be accept()'ed, check for the events on ITC. 1615 */ 1616 static MHD_FN_PAR_NONNULL_ (1) bool 1617 get_all_net_updates_by_epoll (struct MHD_Daemon *restrict d) 1618 { 1619 int max_events; 1620 int num_events; 1621 unsigned int events_processed; 1622 int max_wait; 1623 mhd_assert (mhd_POLL_TYPE_EPOLL == d->events.poll_type); 1624 mhd_assert (0 < ((int) d->events.data.epoll.num_elements)); 1625 mhd_assert (d->events.data.epoll.num_elements == \ 1626 (size_t) ((int) d->events.data.epoll.num_elements)); 1627 mhd_assert (0 != d->events.data.epoll.num_elements); 1628 mhd_assert (0 != d->conns.cfg.count_limit); 1629 mhd_assert (d->events.data.epoll.num_elements == d->dbg.num_events_elements); 1630 1631 // TODO: add listen socket enable/disable 1632 1633 /* Minimise amount of data passed from userspace to kernel and back */ 1634 max_events = (int) d->conns.cfg.count_limit; 1635 #ifdef MHD_SUPPORT_THREADS 1636 ++max_events; 1637 #endif /* MHD_SUPPORT_THREADS */ 1638 if (MHD_INVALID_SOCKET != d->net.listen.fd) 1639 ++max_events; 1640 /* Make sure that one extra slot used to clearly detect that all events 1641 * were gotten. */ 1642 ++max_events; 1643 if ((0 > max_events) || 1644 (max_events > (int) d->events.data.epoll.num_elements)) 1645 max_events = (int) d->events.data.epoll.num_elements; 1646 1647 events_processed = 0; 1648 max_wait = get_max_wait (d); // TODO: use correct timeout value 1649 do 1650 { 1651 #ifdef mhd_DEBUG_POLLING_FDS 1652 fprintf (stderr, 1653 "### (Starting) epoll_wait(%d, events, %d, %d)...\n", 1654 d->events.data.epoll.e_fd, 1655 (int) d->events.data.epoll.num_elements, 1656 max_wait); 1657 #endif /* mhd_DEBUG_POLLING_FDS */ 1658 num_events = epoll_wait (d->events.data.epoll.e_fd, 1659 d->events.data.epoll.events, 1660 max_events, 1661 max_wait); 1662 #ifdef mhd_DEBUG_POLLING_FDS 1663 fprintf (stderr, 1664 "### (Finished) epoll_wait(%d, events, %d, %d) -> %d\n", 1665 d->events.data.epoll.e_fd, 1666 max_events, 1667 max_wait, 1668 num_events); 1669 #endif /* mhd_DEBUG_POLLING_FDS */ 1670 max_wait = 0; 1671 if (0 > num_events) 1672 { 1673 const int err = errno; 1674 if (EINTR != err) 1675 { 1676 mhd_LOG_MSG (d, MHD_SC_EPOLL_HARD_ERROR, \ 1677 "The epoll_wait() encountered unrecoverable error."); 1678 return false; 1679 } 1680 return true; /* EINTR, try next time */ 1681 } 1682 if (! update_statuses_from_eevents (d, (unsigned int) num_events)) 1683 return false; 1684 if (max_events > num_events) 1685 return true; /* All events have been read */ 1686 1687 /* Use all buffer for the next getting events round(s) */ 1688 max_events = (int) d->events.data.epoll.num_elements; 1689 mhd_assert (0 < max_events); 1690 mhd_assert (d->events.data.epoll.num_elements == (size_t) max_events); 1691 max_wait = 0; /* Do not block on the next getting events rounds */ 1692 1693 events_processed += (unsigned int) num_events; /* Avoid reading too many events */ 1694 } while ((events_processed < d->conns.cfg.count_limit) 1695 || (events_processed < d->conns.cfg.count_limit + 2)); 1696 1697 return true; 1698 } 1699 1700 1701 #endif /* MHD_SUPPORT_EPOLL */ 1702 1703 1704 /** 1705 * Perform one round of daemon connection and data processing. 1706 * 1707 * This function do the following: 1708 * + poll all connections and daemon FDs (if internal polling is used); 1709 * + resume connections pending to be resumed; 1710 * + update connection statuses based on socket states (recv/send ready or 1711 * disconnect detection); 1712 * + receive, send and/or parse connections data as needed, including call of 1713 * callbacks for processing requests and response generation; 1714 * + close broken connections; 1715 * + accept new connection (if needed); 1716 * + cleanup closed "upgraded" connections. 1717 * @param d the daemon to use 1718 * @return 'true' on success, 1719 * 'false' if daemon is broken 1720 */ 1721 static MHD_FN_PAR_NONNULL_ (1) bool 1722 process_all_events_and_data (struct MHD_Daemon *restrict d) 1723 { 1724 switch (d->events.poll_type) 1725 { 1726 case mhd_POLL_TYPE_EXT: 1727 mhd_assert (mhd_WM_INT_HAS_EXT_EVENTS (d->wmode_int)); 1728 if (! ext_events_process_net_updates_and_resume_conn (d)) 1729 return false; 1730 break; 1731 #ifdef MHD_SUPPORT_SELECT 1732 case mhd_POLL_TYPE_SELECT: 1733 if (! get_all_net_updates_by_select_and_resume_conn (d, false)) 1734 return false; 1735 break; 1736 #endif /* MHD_SUPPORT_SELECT */ 1737 #ifdef MHD_SUPPORT_POLL 1738 case mhd_POLL_TYPE_POLL: 1739 if (! get_all_net_updates_by_poll (d, false)) 1740 return false; 1741 daemon_resume_conns_if_needed (d); 1742 break; 1743 #endif /* MHD_SUPPORT_POLL */ 1744 #ifdef MHD_SUPPORT_EPOLL 1745 case mhd_POLL_TYPE_EPOLL: 1746 if (! get_all_net_updates_by_epoll (d)) 1747 return false; 1748 daemon_resume_conns_if_needed (d); 1749 break; 1750 #endif /* MHD_SUPPORT_EPOLL */ 1751 #ifndef MHD_SUPPORT_SELECT 1752 case mhd_POLL_TYPE_SELECT: 1753 #endif /* ! MHD_SUPPORT_SELECT */ 1754 #ifndef MHD_SUPPORT_POLL 1755 case mhd_POLL_TYPE_POLL: 1756 #endif /* ! MHD_SUPPORT_POLL */ 1757 case mhd_POLL_TYPE_NOT_SET_YET: 1758 default: 1759 mhd_UNREACHABLE (); 1760 MHD_PANIC ("Daemon data integrity broken"); 1761 break; 1762 } 1763 1764 if (d->events.accept_pending && ! d->conns.block_new) 1765 d->events.accept_pending = ! daemon_accept_new_conns (d); 1766 1767 daemon_process_all_active_conns (d); 1768 daemon_cleanup_upgraded_conns (d); 1769 return ! mhd_D_HAS_STOP_REQ (d); 1770 } 1771 1772 1773 static 1774 MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 1775 process_reg_events_int (struct MHD_Daemon *MHD_RESTRICT daemon, 1776 uint_fast64_t *MHD_RESTRICT next_max_wait) 1777 { 1778 enum MHD_StatusCode res; 1779 1780 if (mhd_DAEMON_STATE_STARTED > daemon->state) 1781 return MHD_SC_TOO_EARLY; 1782 if (! mhd_WM_INT_HAS_EXT_EVENTS (daemon->wmode_int)) 1783 return MHD_SC_EXTERNAL_EVENT_ONLY; 1784 if (mhd_DAEMON_STATE_STARTED < daemon->state) 1785 return MHD_SC_TOO_LATE; 1786 1787 #ifdef MHD_SUPPORT_THREADS 1788 if (daemon->events.data.extr.itc_data.is_broken) 1789 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 1790 #endif /* MHD_SUPPORT_THREADS */ 1791 1792 if (daemon->net.listen.is_broken) 1793 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 1794 1795 /* Ignore returned value */ 1796 (void) process_all_events_and_data (daemon); 1797 1798 if (NULL != next_max_wait) 1799 *next_max_wait = MHD_WAIT_INDEFINITELY; 1800 1801 res = ext_events_update_registrations (daemon); 1802 if (MHD_SC_OK != res) 1803 return res; 1804 1805 #ifdef MHD_SUPPORT_THREADS 1806 if (daemon->events.data.extr.itc_data.is_broken) 1807 { 1808 log_itc_broken (daemon); 1809 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 1810 } 1811 #endif /* MHD_SUPPORT_THREADS */ 1812 1813 if (daemon->net.listen.is_broken) 1814 { 1815 log_listen_broken (daemon); 1816 return MHD_SC_DAEMON_SYS_DATA_BROKEN; 1817 } 1818 1819 if (NULL != next_max_wait) 1820 *next_max_wait = mhd_daemon_get_wait_max (daemon); 1821 1822 return MHD_SC_OK; 1823 } 1824 1825 1826 MHD_EXTERN_ 1827 MHD_FN_PAR_NONNULL_ (1) enum MHD_StatusCode 1828 MHD_daemon_process_reg_events (struct MHD_Daemon *MHD_RESTRICT daemon, 1829 uint_fast64_t *MHD_RESTRICT next_max_wait) 1830 { 1831 enum MHD_StatusCode res; 1832 #ifdef mhd_DEBUG_POLLING_FDS 1833 fprintf (stderr, 1834 "### (Starting) MHD_daemon_process_reg_events(daemon, [%s])...\n", 1835 (NULL != next_max_wait) ? "non-NULL" : "NULL"); 1836 #endif 1837 res = process_reg_events_int (daemon, 1838 next_max_wait); 1839 #ifdef mhd_DEBUG_POLLING_FDS 1840 if (NULL == next_max_wait) 1841 fprintf (stderr, 1842 "### (Finished) MHD_daemon_process_reg_events(daemon, [NULL]) ->" 1843 "%u\n", 1844 (unsigned int) res); 1845 else if (MHD_WAIT_INDEFINITELY == *next_max_wait) 1846 fprintf (stderr, 1847 "### (Finished) MHD_daemon_process_reg_events(daemon, " 1848 "->MHD_WAIT_INDEFINITELY) ->%u\n", 1849 (unsigned int) res); 1850 else 1851 fprintf (stderr, 1852 "### (Finished) MHD_daemon_process_reg_events(daemon, ->%llu) " 1853 "->%u\n", 1854 (unsigned long long) *next_max_wait, 1855 (unsigned int) res); 1856 #endif 1857 return res; 1858 } 1859 1860 1861 #ifdef MHD_SUPPORT_THREADS 1862 1863 /** 1864 * The entry point for the daemon worker thread 1865 * @param cls the closure 1866 */ 1867 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC 1868 mhd_worker_all_events (void *cls) 1869 { 1870 struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls; 1871 mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid)); 1872 mhd_assert (d->dbg.net_inited); 1873 mhd_assert (! d->dbg.net_deinited); 1874 mhd_assert (mhd_D_TYPE_IS_VALID (d->threading.d_type)); 1875 mhd_assert (mhd_D_TYPE_HAS_EVENTS_PROCESSING (d->threading.d_type)); 1876 mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY != d->threading.d_type); 1877 mhd_assert (! mhd_D_TYPE_HAS_WORKERS (d->threading.d_type)); 1878 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION != d->wmode_int); 1879 mhd_assert (d->dbg.events_fully_inited); 1880 mhd_assert (d->dbg.connections_inited); 1881 1882 #ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE 1883 // TODO: store and use the result 1884 (void) mhd_thread_block_sigpipe (); 1885 #endif 1886 1887 while (! d->threading.stop_requested) 1888 { 1889 if (! process_all_events_and_data (d)) 1890 break; 1891 } 1892 if (! d->threading.stop_requested) 1893 { 1894 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \ 1895 "The daemon thread is stopping, but termination has not " \ 1896 "been requested for the daemon."); 1897 } 1898 mhd_daemon_close_all_conns (d); 1899 1900 #ifdef MHD_SUPPORT_HTTPS 1901 if (mhd_D_HAS_TLS (d)) 1902 mhd_tls_thread_cleanup (d->tls); 1903 #endif /* MHD_SUPPORT_HTTPS */ 1904 1905 return (mhd_THRD_RTRN_TYPE) 0; 1906 } 1907 1908 1909 static MHD_FN_PAR_NONNULL_ (1) bool 1910 process_listening_and_itc_only (struct MHD_Daemon *restrict d) 1911 { 1912 if (false) 1913 (void) 0; 1914 #ifdef MHD_SUPPORT_SELECT 1915 else if (mhd_POLL_TYPE_SELECT == d->events.poll_type) 1916 { 1917 return false; // TODO: implement 1918 } 1919 #endif /* MHD_SUPPORT_SELECT */ 1920 #ifdef MHD_SUPPORT_POLL 1921 else if (mhd_POLL_TYPE_POLL == d->events.poll_type) 1922 { 1923 if (! get_all_net_updates_by_poll (d, true)) 1924 return false; 1925 } 1926 #endif /* MHD_SUPPORT_POLL */ 1927 else 1928 { 1929 (void) d; /* Mute compiler warning */ 1930 mhd_assert (0 && "Impossible value"); 1931 mhd_UNREACHABLE (); 1932 MHD_PANIC ("Daemon data integrity broken"); 1933 } 1934 // TODO: Accept connections 1935 return false; 1936 } 1937 1938 1939 /** 1940 * The entry point for the daemon listening thread 1941 * @param cls the closure 1942 */ 1943 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC 1944 mhd_worker_listening_only (void *cls) 1945 { 1946 struct MHD_Daemon *const restrict d = (struct MHD_Daemon *) cls; 1947 mhd_thread_handle_ID_set_current_thread_ID (&(d->threading.tid)); 1948 1949 mhd_assert (d->dbg.net_inited); 1950 mhd_assert (! d->dbg.net_deinited); 1951 mhd_assert (mhd_DAEMON_TYPE_LISTEN_ONLY == d->threading.d_type); 1952 mhd_assert (mhd_WM_INT_INTERNAL_EVENTS_THREAD_PER_CONNECTION == d->wmode_int); 1953 mhd_assert (d->dbg.events_fully_inited); 1954 mhd_assert (d->dbg.connections_inited); 1955 1956 #ifdef mhd_HAVE_MHD_THREAD_BLOCK_SIGPIPE 1957 // TODO: store and use the result 1958 (void) mhd_thread_block_sigpipe (); 1959 #endif 1960 1961 while (! d->threading.stop_requested) 1962 { 1963 if (! process_listening_and_itc_only (d)) 1964 break; 1965 } 1966 if (! d->threading.stop_requested) 1967 { 1968 mhd_LOG_MSG (d, MHD_SC_DAEMON_THREAD_STOP_UNEXPECTED, \ 1969 "The daemon thread is stopping, but termination has " \ 1970 "not been requested by the daemon."); 1971 } 1972 1973 #ifdef MHD_SUPPORT_HTTPS 1974 if (mhd_D_HAS_TLS (d)) 1975 mhd_tls_thread_cleanup (d->tls); 1976 #endif /* MHD_SUPPORT_HTTPS */ 1977 1978 return (mhd_THRD_RTRN_TYPE) 0; 1979 } 1980 1981 1982 mhd_THRD_RTRN_TYPE mhd_THRD_CALL_SPEC 1983 mhd_worker_connection (void *cls) 1984 { 1985 if (cls) // TODO: Implement 1986 MHD_PANIC ("Not yet implemented"); 1987 1988 #if 0 // def MHD_SUPPORT_HTTPS 1989 if (mhd_D_HAS_TLS (d)) 1990 mhd_tls_thread_cleanup (d->tls); 1991 #endif /* MHD_SUPPORT_HTTPS */ 1992 1993 return (mhd_THRD_RTRN_TYPE) 0; 1994 } 1995 1996 1997 #endif /* MHD_SUPPORT_THREADS */