h2_reply_funcs.c (18048B)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */ 2 /* 3 This file is part of GNU libmicrohttpd. 4 Copyright (C) 2025 Evgeny Grin (Karlson2k) 5 6 GNU libmicrohttpd is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Lesser General Public 8 License as published by the Free Software Foundation; either 9 version 2.1 of the License, or (at your option) any later version. 10 11 GNU libmicrohttpd is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Lesser General Public License for more details. 15 16 Alternatively, you can redistribute GNU libmicrohttpd and/or 17 modify it under the terms of the GNU General Public License as 18 published by the Free Software Foundation; either version 2 of 19 the License, or (at your option) any later version, together 20 with the eCos exception, as follows: 21 22 As a special exception, if other files instantiate templates or 23 use macros or inline functions from this file, or you compile this 24 file and link it with other works to produce a work based on this 25 file, this file does not by itself cause the resulting work to be 26 covered by the GNU General Public License. However the source code 27 for this file must still be made available in accordance with 28 section (3) of the GNU General Public License v2. 29 30 This exception does not invalidate any other reasons why a work 31 based on this file might be covered by the GNU General Public 32 License. 33 34 You should have received copies of the GNU Lesser General Public 35 License and the GNU General Public License along with this library; 36 if not, see <https://www.gnu.org/licenses/>. 37 */ 38 39 /** 40 * @file src/mhd2/h2/h2_reply_funcs.c 41 * @brief Definitions of HTTP/2 reply sending functions 42 * @author Karlson2k (Evgeny Grin) 43 */ 44 45 #include "mhd_sys_options.h" 46 47 #include "sys_bool_type.h" 48 #include "sys_base_types.h" 49 50 #include "mhd_str_macros.h" 51 52 #include "mhd_constexpr.h" 53 54 #include "mhd_assert.h" 55 #include "mhd_unreachable.h" 56 57 #include "mhd_buffer.h" 58 #include "mhd_response.h" 59 #include "mhd_connection.h" 60 #include "mhd_daemon.h" 61 62 #include "mhd_str.h" 63 #include "mhd_read_file.h" 64 65 #include "stream_process_reply.h" 66 67 #include "h2_conn_data.h" 68 #include "h2_stream_data.h" 69 70 #include "h2_frame_init.h" 71 #include "h2_proc_conn.h" 72 #include "h2_proc_out.h" 73 74 #include "h2_frame_codec.h" 75 76 #include "hpack/mhd_hpack_codec.h" 77 78 79 #include "h2_reply_funcs.h" 80 81 struct mhd_H2Stream; /* Forward declaration */ 82 83 /* local wrapper */ 84 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ MHD_FN_PAR_NONNULL_ALL_ 85 MHD_FN_PAR_INOUT_ (1) MHD_FN_PAR_IN_ (2) MHD_FN_PAR_IN_ (3) 86 MHD_FN_PAR_OUT_SIZE_ (6,5) MHD_FN_PAR_OUT_ (7) bool 87 enc_field (struct mhd_HpackEncContext *restrict hk_enc, 88 const struct mhd_BufferConst *restrict name, 89 const struct mhd_BufferConst *restrict value, 90 enum mhd_HpackEncPolicy enc_pol, 91 const size_t out_buff_size, 92 uint8_t *restrict out_buff, 93 size_t *restrict bytes_encoded) 94 { 95 enum mhd_HpackEncResult enc_res; 96 97 enc_res = mhd_hpack_enc_field (hk_enc, 98 name, 99 value, 100 enc_pol, 101 out_buff_size, 102 out_buff, 103 bytes_encoded); 104 105 mhd_assert (mhd_HPACK_ENC_RES_ALLOC_ERR != enc_res); 106 107 return (mhd_HPACK_ENC_RES_OK == enc_res); 108 } 109 110 111 static MHD_FN_PAR_NONNULL_ALL_ size_t 112 stream_headers_encode (struct mhd_H2Stream *restrict s, 113 struct mhd_Buffer *restrict pl, 114 bool *restrict fields_complete) 115 { 116 struct mhd_HpackEncContext *const hk_enc = &(s->c->h2.hk_enc); 117 struct MHD_Response *const r = s->rpl.response; 118 uint8_t *restrict buff = (uint8_t *) pl->data; 119 size_t pos; 120 size_t pos_incr; 121 size_t fld_num; 122 enum mhd_HpackEncResult enc_res; 123 struct mhd_ResponseHeader *hdr; 124 125 *fields_complete = false; /* Could be updated at the end */ 126 pos = 0u; 127 fld_num = 0u; 128 129 /* Pseudo-header */ 130 if (fld_num >= s->rpl.fields.num_sent) 131 { 132 enc_res = mhd_hpack_enc_ph_status (hk_enc, 133 (uint_fast16_t) s->rpl.response->sc, 134 mhd_HPACK_ENC_PFS_POL_NORMAL, 135 pl->size - pos, 136 buff + pos, 137 &pos_incr); 138 mhd_assert (mhd_HPACK_ENC_RES_ALLOC_ERR != enc_res); 139 if (mhd_HPACK_ENC_RES_OK != enc_res) 140 return pos; 141 142 pos += pos_incr; 143 ++(s->rpl.fields.num_sent); 144 } 145 ++fld_num; 146 147 /* "date" header */ 148 149 if ( (! r->cfg.has_hdr_date) && 150 (! s->c->daemon->req_cfg.suppress_date) ) 151 { 152 if (fld_num >= s->rpl.fields.num_sent) 153 { 154 char val_buff[30]; 155 if (mhd_build_date_str (val_buff)) 156 { 157 static const struct mhd_BufferConst hdr_name = 158 mhd_MSTR_INIT ("date"); 159 struct mhd_BufferConst hdr_val; 160 161 hdr_val.data = val_buff; 162 hdr_val.size = 29u; 163 164 if (! enc_field (hk_enc, 165 &hdr_name, 166 &hdr_val, 167 1 >= s->c->h2.streams.num_streams ? 168 mhd_HPACK_ENC_POL_LOW_PRIO : mhd_HPACK_ENC_POL_NEUTRAL, 169 pl->size - pos, 170 buff + pos, 171 &pos_incr)) 172 return pos; 173 174 pos += pos_incr; 175 ++(s->rpl.fields.num_sent); 176 } 177 } 178 ++fld_num; 179 } 180 181 /* "content-length" header */ 182 if (s->rpl.fields.auto_cntn_len) 183 { 184 if (fld_num >= s->rpl.fields.num_sent) 185 { 186 static const struct mhd_BufferConst hdr_name = 187 mhd_MSTR_INIT ("content-length"); 188 char val_buff[21]; /* Maximum supported value is 18446744073709551615 */ 189 struct mhd_BufferConst hdr_val; 190 191 mhd_assert (MHD_SIZE_UNKNOWN > r->cntn_size); 192 hdr_val.data = val_buff; 193 hdr_val.size = mhd_uint64_to_str (r->cntn_size, 194 val_buff, 195 sizeof(val_buff)); 196 mhd_assert (0u != hdr_val.size); 197 198 if (! enc_field (hk_enc, 199 &hdr_name, 200 &hdr_val, 201 r->reuse.reusable ? 202 mhd_HPACK_ENC_POL_NEUTRAL : mhd_HPACK_ENC_POL_LOW_PRIO, 203 pl->size - pos, 204 buff + pos, 205 &pos_incr)) 206 return pos; 207 208 pos += pos_incr; 209 ++(s->rpl.fields.num_sent); 210 } 211 ++fld_num; 212 } 213 214 /* User headers */ 215 216 for (hdr = mhd_DLINKEDL_GET_FIRST (r, headers); 217 NULL != hdr; 218 hdr = mhd_DLINKEDL_GET_NEXT (hdr, headers)) 219 { 220 if (NULL == hdr->h2.name.data) 221 continue; /* The header is HTTP/1.x only */ 222 223 if (fld_num >= s->rpl.fields.num_sent) 224 { 225 if (! enc_field (hk_enc, 226 &(hdr->h2.name), 227 &(hdr->h2.value), 228 mhd_HPACK_ENC_POL_NEUTRAL, 229 pl->size - pos, 230 buff + pos, 231 &pos_incr)) 232 return pos; 233 pos += pos_incr; 234 ++(s->rpl.fields.num_sent); 235 } 236 ++fld_num; 237 } 238 239 *fields_complete = true; 240 return pos; 241 } 242 243 244 static MHD_FN_PAR_NONNULL_ALL_ bool 245 stream_headers_send (struct mhd_H2Stream *s) 246 { 247 union mhd_H2FrameUnion h2frame; 248 struct mhd_H2FrameHeadersInfo *hdrs; 249 struct mhd_H2FrameContinuationInfo *cont; 250 struct mhd_Buffer buff; 251 struct mhd_Buffer payload; 252 size_t payload_offset; 253 bool *complete_header; 254 size_t payload_used; 255 256 mhd_assert (mhd_H2_RPL_STAGE_HEADERS_INCOMPLETE == s->rpl.stage); 257 258 if (0u == s->rpl.fields.num_sent) 259 { 260 hdrs = mhd_h2_frame_init_headers (&h2frame, 261 s->stream_id, 262 false, /* could be updated below */ 263 ! s->rpl.send_content); 264 cont = NULL; 265 complete_header = &(hdrs->end_headers); 266 } 267 else 268 { 269 hdrs = NULL; 270 cont = mhd_h2_frame_init_continuation (&h2frame, 271 s->stream_id, 272 false); /* could be updated below */ 273 complete_header = &(cont->end_headers); 274 } 275 276 if (! mhd_h2_out_buff_acquire_fr_w_payload (s->c, 277 &h2frame, 278 &buff, 279 &payload_offset)) 280 return false; 281 282 payload.data = buff.data + payload_offset; 283 payload.size = buff.size - payload_offset; 284 285 payload_used = stream_headers_encode (s, 286 &payload, 287 complete_header); 288 289 if (0u != payload_used) 290 { 291 const size_t full_fr_size = mhd_h2_frame_set_payload_size (&h2frame, 292 payload_used); 293 const size_t final_fr_hdr_size = 294 mhd_h2_frame_hdr_encode (&h2frame, 295 payload_offset, 296 (uint8_t*) buff.data); 297 mhd_assert (payload_offset == final_fr_hdr_size); 298 (void) final_fr_hdr_size; 299 300 mhd_h2_out_buff_unlock (s->c, 301 full_fr_size); 302 if (*complete_header) 303 { 304 s->rpl.stage = s->rpl.send_content ? 305 mhd_H2_RPL_STAGE_HEADERS_COMPLETE : 306 mhd_H2_RPL_STAGE_END_STREAM; 307 } 308 return true; /* Success exit point */ 309 } 310 311 mhd_h2_out_buff_unlock (s->c, 312 0u); 313 314 if (((s->c->write_buffer_size - s->c->write_buffer_append_offset) >= 315 mhd_H2_FR_HDR_BASE_SIZE + s->c->h2.peer.max_frame_size) || 316 (0u == s->c->write_buffer_append_offset)) 317 { 318 /* The output buffer may contain the maximum size frame, but no single 319 header has been added. It makes no sense to wait more as the 320 response header is too large to be used in this connection. */ 321 s->state.mhd_err = mhd_H2_ERR_INTERNAL_ERROR; 322 s->rpl.stage = mhd_H2_RPL_STAGE_BROKEN; 323 return false; 324 } 325 326 return false; 327 } 328 329 330 static MHD_FN_PAR_NONNULL_ALL_ 331 MHD_FN_PAR_OUT_SIZE_ (4,3) 332 MHD_FN_PAR_OUT_ (5) bool 333 content_read_iovec (struct MHD_Response *restrict r, 334 uint_fast64_t offset, 335 size_t buff_size, 336 uint8_t *restrict buff, 337 size_t *restrict written) 338 { 339 size_t i; 340 uint_fast64_t skipped; 341 const mhd_iovec *const restrict iov = r->cntn.iovec.iov; 342 343 mhd_assert (mhd_RESPONSE_CONTENT_DATA_IOVEC == r->cntn_dtype); 344 345 skipped = 0u; 346 347 for (i = 0u; r->cntn.iovec.cnt > i; ++i) 348 { 349 if (skipped + iov[i].iov_len > offset) 350 break; 351 skipped += iov[i].iov_len; 352 mhd_assert (skipped >= iov[i].iov_len); 353 } 354 355 if (r->cntn.iovec.cnt == i) 356 return false; 357 358 if (1) 359 { 360 size_t elmnt_copy; 361 const size_t elmnt_off = (size_t) (offset - skipped); 362 363 if (elmnt_off != (offset - skipped)) 364 return false; 365 366 mhd_assert (0u != iov[i].iov_len); 367 368 elmnt_copy = (size_t) (iov[i].iov_len - elmnt_off); 369 if (buff_size < elmnt_copy) 370 elmnt_copy = buff_size; 371 372 memcpy (buff, 373 ((const uint8_t *) iov[i].iov_base) + elmnt_off, 374 elmnt_copy); 375 *written = elmnt_copy; 376 377 if (elmnt_copy == buff_size) 378 return true; 379 380 ++i; 381 } 382 383 for ((void) i; r->cntn.iovec.cnt > i; ++i) 384 { 385 mhd_assert (0u != iov[i].iov_len); 386 if ((buff_size - *written) <= iov[i].iov_len) 387 { 388 memcpy (buff + *written, 389 iov[i].iov_base, 390 buff_size - *written); 391 *written = buff_size; 392 return true; 393 } 394 memcpy (buff + *written, 395 iov[i].iov_base, 396 (size_t) iov[i].iov_len); 397 *written += (size_t) iov[i].iov_len; 398 mhd_assert (*written > iov[i].iov_len); 399 } 400 return true; 401 } 402 403 404 static MHD_FN_PAR_NONNULL_ALL_ 405 MHD_FN_PAR_OUT_SIZE_ (4,3) 406 MHD_FN_PAR_OUT_ (5) bool 407 content_read_file (struct MHD_Response *restrict r, 408 uint_fast64_t offset, 409 size_t buff_size, 410 uint8_t *restrict buff, 411 size_t *restrict written) 412 { 413 uint_fast64_t file_off; 414 415 mhd_assert (mhd_RESPONSE_CONTENT_DATA_FILE == r->cntn_dtype); 416 // TODO: support pipe reading without position 417 mhd_assert (! r->cntn.file.is_pipe); 418 419 file_off = offset + r->cntn.file.offset; 420 if (file_off < offset) 421 return false; /* Offset too large */ 422 423 return (mhd_FILE_READ_OK == 424 mhd_read_file (r->cntn.file.fd, 425 file_off, 426 buff_size, 427 (char*) buff, 428 written)); 429 } 430 431 432 mhd_constexpr uint_least32_t min_size_for_data = 128u; 433 434 static MHD_FN_PAR_NONNULL_ALL_ bool 435 stream_content_send (struct mhd_H2Stream *s) 436 { 437 struct MHD_Response *const r = s->rpl.response; 438 union mhd_H2FrameUnion h2frame; 439 struct mhd_H2FrameDataInfo *dat; 440 struct mhd_Buffer buff; 441 uint8_t *pld_buff; 442 size_t pld_buff_size; 443 size_t cntnt_left; 444 size_t payload_offset; 445 size_t payload_used; 446 int_least32_t wndw_limit; 447 uint_least32_t full_payload_limit; 448 449 mhd_assert (mhd_H2_RPL_STAGE_HEADERS_COMPLETE == s->rpl.stage); 450 mhd_assert (s->rpl.send_content); 451 mhd_assert (0u != r->cntn_size); 452 mhd_assert (! r->cfg.head_only); 453 454 if (s->c->h2.state.send_window < s->state.send_window) 455 wndw_limit = s->c->h2.state.send_window; 456 else 457 wndw_limit = s->state.send_window; 458 459 if (0 >= wndw_limit) 460 return false; /* The peer should increment window(s) first */ 461 462 full_payload_limit = (uint_least32_t) wndw_limit; 463 if (MHD_SIZE_UNKNOWN != r->cntn_size) 464 { 465 cntnt_left = r->cntn_size - s->rpl.cntn_read_pos; 466 if (cntnt_left < full_payload_limit) 467 full_payload_limit = (uint_least32_t) cntnt_left; 468 } 469 else 470 cntnt_left = MHD_SIZE_UNKNOWN; 471 472 if ((min_size_for_data > full_payload_limit) 473 && (cntnt_left != full_payload_limit)) 474 return false; 475 476 dat = mhd_h2_frame_init_data (&h2frame, 477 s->stream_id, 478 false); /* could be updated below */ 479 480 if (! mhd_h2_out_buff_acquire_fr_w_payload_l (s->c, 481 &h2frame, 482 full_payload_limit, 483 &buff, 484 &payload_offset)) 485 return false; 486 487 pld_buff = (uint8_t*) buff.data + payload_offset; 488 pld_buff_size = buff.size - payload_offset; 489 mhd_assert (mhd_H2_FR_HDR_BASE_SIZE < pld_buff_size); 490 491 mhd_assert (r->cntn_size > s->rpl.cntn_read_pos); 492 493 payload_used = 0u; 494 switch (r->cntn_dtype) 495 { 496 case mhd_RESPONSE_CONTENT_DATA_BUFFER: 497 payload_used = (size_t) full_payload_limit; 498 memcpy (pld_buff, 499 r->cntn.buf + s->rpl.cntn_read_pos, 500 payload_used); 501 break; 502 case mhd_RESPONSE_CONTENT_DATA_IOVEC: 503 if (! content_read_iovec (r, 504 s->rpl.cntn_read_pos, 505 pld_buff_size, 506 pld_buff, 507 &payload_used)) 508 payload_used = 0u; 509 break; 510 case mhd_RESPONSE_CONTENT_DATA_FILE: 511 if (! content_read_file (r, 512 s->rpl.cntn_read_pos, 513 pld_buff_size, 514 pld_buff, 515 &payload_used)) 516 payload_used = 0u; 517 break; 518 case mhd_RESPONSE_CONTENT_DATA_CALLBACK: 519 s->rpl.stage = mhd_H2_RPL_STAGE_BROKEN; 520 break; 521 case mhd_RESPONSE_CONTENT_DATA_INVALID: 522 default: 523 mhd_UNREACHABLE (); 524 s->rpl.stage = mhd_H2_RPL_STAGE_BROKEN; 525 break; 526 } 527 528 dat->end_stream = (payload_used + s->rpl.cntn_read_pos == r->cntn_size); 529 530 if (0u != payload_used) 531 { 532 const size_t full_fr_size = mhd_h2_frame_set_payload_size (&h2frame, 533 payload_used); 534 const size_t final_fr_hdr_size = 535 mhd_h2_frame_hdr_encode (&h2frame, 536 payload_offset, 537 (uint8_t*) buff.data); 538 mhd_assert (payload_offset == final_fr_hdr_size); 539 (void) final_fr_hdr_size; 540 541 mhd_h2_out_buff_unlock (s->c, 542 full_fr_size); 543 s->c->h2.state.send_window -= 544 (int_least32_t) (full_fr_size - mhd_H2_FR_HDR_BASE_SIZE); 545 mhd_assert (0 <= s->c->h2.state.send_window); 546 s->state.send_window -= 547 (int_least32_t) (full_fr_size - mhd_H2_FR_HDR_BASE_SIZE); 548 mhd_assert (0 <= s->state.send_window); 549 550 return true; /* Success exit point */ 551 } 552 553 mhd_h2_out_buff_unlock (s->c, 554 0u); 555 556 s->state.mhd_err = mhd_H2_ERR_INTERNAL_ERROR; 557 s->rpl.stage = mhd_H2_RPL_STAGE_BROKEN; 558 559 return false; 560 561 } 562 563 564 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ bool 565 mhd_h2_stream_reply_send (struct mhd_H2Stream *s) 566 { 567 mhd_assert (s->is_h2); 568 mhd_assert (mhd_H2_RPL_STAGE_END_STREAM != s->rpl.stage); 569 mhd_assert (mhd_H2_RPL_STAGE_BROKEN != s->rpl.stage); 570 571 if (mhd_H2_RPL_STAGE_HEADERS_INCOMPLETE == s->rpl.stage) 572 { 573 if (! mhd_hpack_enc_dyn_resize (&(s->c->h2.hk_enc))) 574 { 575 /* Ignore failure of the next function as the connection and stream 576 will be retried next round if connection is not aborted. */ 577 mhd_h2_conn_finish (s->c, 578 mhd_H2_ERR_INTERNAL_ERROR, 579 false); 580 return false; 581 } 582 583 if (! stream_headers_send (s)) 584 return false; 585 586 if ((mhd_H2_RPL_STAGE_HEADERS_COMPLETE == s->rpl.stage) && 587 (mhd_RESPONSE_CONTENT_DATA_FILE <= s->rpl.response->cntn_dtype)) 588 return true; /* Do not combine with content sending as the data is not ready yet */ 589 } 590 591 if (mhd_H2_RPL_STAGE_HEADERS_COMPLETE == s->rpl.stage) 592 { 593 if (! stream_content_send (s)) 594 return false; 595 } 596 597 return true; 598 599 }