aboutsummaryrefslogtreecommitdiff
path: root/src/microhttpd/memorypool.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/microhttpd/memorypool.c')
-rw-r--r--src/microhttpd/memorypool.c103
1 files changed, 103 insertions, 0 deletions
diff --git a/src/microhttpd/memorypool.c b/src/microhttpd/memorypool.c
index e0511830..8d80f888 100644
--- a/src/microhttpd/memorypool.c
+++ b/src/microhttpd/memorypool.c
@@ -577,6 +577,109 @@ MHD_pool_reallocate (struct MemoryPool *pool,
577 577
578 578
579/** 579/**
580 * Deallocate a block of memory obtained from the pool.
581 *
582 * If the given block is not the most recently
583 * (re)allocated block, the memory of the this block
584 * allocation may be not released until the pool is
585 * destroyed or reset.
586 *
587 * @param pool memory pool to use for the operation
588 * @param block the allocated block, the NULL is tolerated
589 * @param block_size the size of the allocated block
590 */
591void
592MHD_pool_deallocate (struct MemoryPool *pool,
593 void *block,
594 size_t block_size)
595{
596 mhd_assert (pool->end >= pool->pos);
597 mhd_assert (pool->size >= pool->end - pool->pos);
598 mhd_assert (block != NULL || block_size == 0);
599 mhd_assert (pool->size >= block_size);
600 mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
601
602 if (NULL != block)
603 { /* Have previously allocated data */
604 const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
605 mhd_assert (mp_ptr_le_ (pool->memory, block));
606 mhd_assert (block_offset <= pool->size);
607 mhd_assert ((block_offset != pool->pos) || (block_size == 0));
608 /* Zero-out deallocated region */
609 if (0 != block_size)
610 {
611 memset (block, 0, block_size);
612 _MHD_POISON_MEMORY (block, block_size);
613 }
614#if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE)
615 else
616 return; /* Zero size, no need to do anything */
617#endif /* ! MHD_FAVOR_SMALL_CODE && ! MHD_ASAN_POISON_ACTIVE */
618 if (block_offset <= pool->pos)
619 {
620 /* "Normal" block, not allocated "from the end". */
621 const size_t alg_end =
622 ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
623 mhd_assert (alg_end <= pool->pos);
624 if (alg_end == pool->pos)
625 {
626 /* The last allocated block, return deallocated block to the pool */
627 size_t alg_start = ROUND_TO_ALIGN (block_offset);
628 mhd_assert (alg_start >= block_offset);
629#if defined(MHD_ASAN_POISON_ACTIVE)
630 if (alg_start != block_offset)
631 {
632 _MHD_POISON_MEMORY (pool->memory + block_offset, \
633 alg_start - block_offset);
634 }
635 else if (0 != alg_start)
636 {
637 bool need_red_zone_before;
638 mhd_assert (_MHD_RED_ZONE_SIZE <= alg_start);
639#if defined(HAVE___ASAN_REGION_IS_POISONED)
640 need_red_zone_before =
641 (NULL == __asan_region_is_poisoned (pool->memory
642 + alg_start
643 - _MHD_RED_ZONE_SIZE,
644 _MHD_RED_ZONE_SIZE));
645#elif defined(HAVE___ASAN_ADDRESS_IS_POISONED)
646 need_red_zone_before =
647 (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1));
648#else /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
649 need_red_zone_before = true; /* Unknown, assume new red zone needed */
650#endif /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
651 if (need_red_zone_before)
652 {
653 _MHD_POISON_MEMORY (pool->memory + alg_start, _MHD_RED_ZONE_SIZE);
654 alg_start += _MHD_RED_ZONE_SIZE;
655 }
656 }
657#endif /* MHD_ASAN_POISON_ACTIVE */
658 mhd_assert (alg_start <= pool->pos);
659 mhd_assert (alg_start == ROUND_TO_ALIGN (alg_start));
660 pool->pos = alg_start;
661 }
662 }
663 else
664 {
665 /* Allocated "from the end" block. */
666 /* The size and the pointers of such block should not be manipulated by
667 MHD code (block split is disallowed). */
668 mhd_assert (block_offset >= pool->end);
669 mhd_assert (ROUND_TO_ALIGN (block_offset) == block_offset);
670 if (block_offset == pool->end)
671 {
672 /* The last allocated block, return deallocated block to the pool */
673 const size_t alg_end =
674 ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
675 pool->end = alg_end;
676 }
677 }
678 }
679}
680
681
682/**
580 * Clear all entries from the memory pool except 683 * Clear all entries from the memory pool except
581 * for @a keep of the given @a copy_bytes. The pointer 684 * for @a keep of the given @a copy_bytes. The pointer
582 * returned should be a buffer of @a new_size where 685 * returned should be a buffer of @a new_size where