libmicrohttpd2

HTTP server C library (MHD 2.x, alpha)
Log | Files | Refs | README | LICENSE

commit 5a22addc8e7edea9be186be829b268ba2ce5d295
parent 76c60a727bdd0519831c9fe832709b64921d4535
Author: Evgeny Grin (Karlson2k) <k2k@drgrin.dev>
Date:   Thu, 24 Apr 2025 18:41:28 +0200

Fixed use of reserved identifiers

Diffstat:
Msrc/mhd2/mhd_itc.h | 18+++++++++---------
Msrc/mhd2/mhd_mempool.c | 152++++++++++++++++++++++++++++++++++++++++---------------------------------------
2 files changed, 86 insertions(+), 84 deletions(-)

diff --git a/src/mhd2/mhd_itc.h b/src/mhd2/mhd_itc.h @@ -112,9 +112,9 @@ static const uint_fast64_t mhd_ITC_WR_DATA = 1; * Clear signalled state on @a itc * @param itc the itc to clear */ -#define mhd_itc_clear(itc) \ - do { uint_fast64_t __b; \ - (void) read ((itc).fd, (void*) &__b, 8); \ +#define mhd_itc_clear(itc) \ + do { uint_fast64_t mhd__b; \ + (void) read ((itc).fd, (void*) &mhd__b, 8); \ } while (0) /** @@ -215,9 +215,9 @@ mhd_itc_nonblocking (struct mhd_itc *pitc); * Clear signaled state on @a itc * @param itc the itc to clear */ -# define mhd_itc_clear(itc) do \ - { long __b; \ - while (0 < read ((itc).fd[0], (void*) &__b, sizeof(__b))) \ +# define mhd_itc_clear(itc) do \ + { long mhd__b; \ + while (0 < read ((itc).fd[0], (void*) &mhd__b, sizeof(mhd__b))) \ {(void) 0;} } while (0) /** @@ -304,9 +304,9 @@ mhd_itc_nonblocking (struct mhd_itc *pitc); * Clear signaled state on @a itc * @param itc the itc to clear */ -# define mhd_itc_clear(itc) do \ - { long __b; \ - while (0 < recv ((itc).sk[0], (void*) &__b, sizeof(__b), 0)) \ +# define mhd_itc_clear(itc) do \ + { long mhd__b; \ + while (0 < recv ((itc).sk[0], (void*) &mhd__b, sizeof(mhd__b), 0)) \ {(void) 0;} } while (0) /** diff --git a/src/mhd2/mhd_mempool.c b/src/mhd2/mhd_mempool.c @@ -64,17 +64,19 @@ #include "mhd_limits.h" +#ifndef mhd_FALLBACK_PAGE_SIZE /** * Fallback value of page size */ -#define _MHD_FALLBACK_PAGE_SIZE (4096) +# define mhd_FALLBACK_PAGE_SIZE (4096) +#endif #if defined(MHD_USE_PAGESIZE_MACRO) -#define MHD_DEF_PAGE_SIZE_ PAGESIZE +# define mhd_DEF_PAGE_SIZE PAGESIZE #elif defined(MHD_USE_PAGE_SIZE_MACRO) -#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE +# define mhd_DEF_PAGE_SIZE PAGE_SIZE #else /* ! PAGESIZE */ -#define MHD_DEF_PAGE_SIZE_ _MHD_FALLBACK_PAGE_SIZE +# define mhd_DEF_PAGE_SIZE mhd_FALLBACK_PAGE_SIZE #endif /* ! PAGESIZE */ @@ -100,52 +102,52 @@ /** * Round up 'n' to a multiple of ALIGN_SIZE. */ -#define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \ - / (ALIGN_SIZE) *(ALIGN_SIZE)) +#define mhd_ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \ + / (ALIGN_SIZE) *(ALIGN_SIZE)) #ifndef MHD_ASAN_POISON_ACTIVE -#define _MHD_NOSANITIZE_PTRS /**/ -#define _MHD_RED_ZONE_SIZE (0) -#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) ROUND_TO_ALIGN (n) -#define _MHD_POISON_MEMORY(pointer, size) (void) 0 -#define _MHD_UNPOISON_MEMORY(pointer, size) (void) 0 +# define mhd_NOSANITIZE_PTRS /**/ +# define mhd_RED_ZONE_SIZE (0) +# define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) mhd_ROUND_TO_ALIGN (n) +# define mhd_POISON_MEMORY(pointer, size) (void) 0 +# define mhd_UNPOISON_MEMORY(pointer, size) (void) 0 /** * Boolean 'true' if the first pointer is less or equal the second pointer */ -#define mp_ptr_le_(p1,p2) \ +# define mp_ptr_le_(p1,p2) \ (((const uint8_t*) (p1)) <= ((const uint8_t*) (p2))) /** * The difference in bytes between positions of the first and * the second pointers */ -#define mp_ptr_diff_(p1,p2) \ +# define mp_ptr_diff_(p1,p2) \ ((size_t) (((const uint8_t*) (p1)) - ((const uint8_t*) (p2)))) #else /* MHD_ASAN_POISON_ACTIVE */ -#define _MHD_RED_ZONE_SIZE (ALIGN_SIZE) -#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) \ - (ROUND_TO_ALIGN (n) + _MHD_RED_ZONE_SIZE) -#define _MHD_POISON_MEMORY(pointer, size) \ +# define mhd_RED_ZONE_SIZE (ALIGN_SIZE) +# define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) \ + (mhd_ROUND_TO_ALIGN (n) + mhd_RED_ZONE_SIZE) +# define mhd_POISON_MEMORY(pointer, size) \ ASAN_POISON_MEMORY_REGION ((pointer), (size)) -#define _MHD_UNPOISON_MEMORY(pointer, size) \ +# define mhd_UNPOISON_MEMORY(pointer, size) \ ASAN_UNPOISON_MEMORY_REGION ((pointer), (size)) -#if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS) +# if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS) /** * Boolean 'true' if the first pointer is less or equal the second pointer */ -#define mp_ptr_le_(p1,p2) \ +# define mp_ptr_le_(p1,p2) \ (((uintptr_t) ((const void*) (p1))) <= \ ((uintptr_t) ((const void*) (p2)))) /** * The difference in bytes between positions of the first and * the second pointers */ -#define mp_ptr_diff_(p1,p2) \ +# define mp_ptr_diff_(p1,p2) \ ((size_t) (((uintptr_t) ((const uint8_t*) (p1))) - \ ((uintptr_t) ((const uint8_t*) (p2))))) #elif defined(FUNC_ATTR_PTRCOMPARE_WORKS) && \ defined(FUNC_ATTR_PTRSUBTRACT_WORKS) -#ifdef _DEBUG +# ifdef _DEBUG /** * Boolean 'true' if the first pointer is less or equal the second pointer */ @@ -156,7 +158,7 @@ mp_ptr_le_ (const void *p1, const void *p2) } -#endif /* _DEBUG */ +# endif /* _DEBUG */ /** @@ -170,8 +172,8 @@ mp_ptr_diff_ (const void *p1, const void *p2) } -#elif defined(FUNC_ATTR_NOSANITIZE_WORKS) -#ifdef _DEBUG +# elif defined(FUNC_ATTR_NOSANITIZE_WORKS) +# ifdef _DEBUG /** * Boolean 'true' if the first pointer is less or equal the second pointer */ @@ -182,7 +184,7 @@ mp_ptr_le_ (const void *p1, const void *p2) } -#endif /* _DEBUG */ + #endif /* _DEBUG */ /** * The difference in bytes between positions of the first and @@ -195,9 +197,9 @@ mp_ptr_diff_ (const void *p1, const void *p2) } -#else /* ! FUNC_ATTR_NOSANITIZE_WORKS */ +# else /* ! FUNC_ATTR_NOSANITIZE_WORKS */ #error User-poisoning cannot be used -#endif /* ! FUNC_ATTR_NOSANITIZE_WORKS */ +# endif /* ! FUNC_ATTR_NOSANITIZE_WORKS */ #endif /* MHD_ASAN_POISON_ACTIVE */ /** @@ -209,7 +211,7 @@ static size_t MHD_sys_page_size_ = (size_t) #elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC) PAGE_SIZE; #else /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */ - _MHD_FALLBACK_PAGE_SIZE; /* Default fallback value */ + mhd_FALLBACK_PAGE_SIZE; /* Default fallback value */ #endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */ /** @@ -224,13 +226,13 @@ mhd_init_mem_pools (void) if (-1 != result) MHD_sys_page_size_ = (size_t) result; else - MHD_sys_page_size_ = (size_t) MHD_DEF_PAGE_SIZE_; + MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE; #elif defined(_WIN32) SYSTEM_INFO si; GetSystemInfo (&si); MHD_sys_page_size_ = (size_t) si.dwPageSize; #else - MHD_sys_page_size_ = (size_t) MHD_DEF_PAGE_SIZE_; + MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE; #endif /* _WIN32 */ mhd_assert (0 == (MHD_sys_page_size_ % ALIGN_SIZE)); } @@ -319,7 +321,7 @@ mdh_pool_create (size_t max) #endif /* ! _WIN32 && ! MAP_ANONYMOUS */ if (MAP_FAILED == pool->memory) { - alloc_size = ROUND_TO_ALIGN (max); + alloc_size = mhd_ROUND_TO_ALIGN (max); pool->memory = malloc (alloc_size); if (NULL == pool->memory) { @@ -339,7 +341,7 @@ mdh_pool_create (size_t max) pool->end = alloc_size; pool->size = alloc_size; mhd_assert (0 < alloc_size); - _MHD_POISON_MEMORY (pool->memory, pool->size); + mhd_POISON_MEMORY (pool->memory, pool->size); return pool; } @@ -357,8 +359,8 @@ mhd_pool_destroy (struct mhd_MemoryPool *restrict pool) mhd_assert (pool->end >= pool->pos); mhd_assert (pool->size >= pool->end - pool->pos); - mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos)); - _MHD_UNPOISON_MEMORY (pool->memory, pool->size); + mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos)); + mhd_UNPOISON_MEMORY (pool->memory, pool->size); if (! pool->is_mmap) free (pool->memory); else @@ -387,12 +389,12 @@ mhd_pool_get_free (struct mhd_MemoryPool *restrict pool) { mhd_assert (pool->end >= pool->pos); mhd_assert (pool->size >= pool->end - pool->pos); - mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos)); + mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos)); #ifdef MHD_ASAN_POISON_ACTIVE - if ((pool->end - pool->pos) <= _MHD_RED_ZONE_SIZE) + if ((pool->end - pool->pos) <= mhd_RED_ZONE_SIZE) return 0; #endif /* MHD_ASAN_POISON_ACTIVE */ - return (pool->end - pool->pos) - _MHD_RED_ZONE_SIZE; + return (pool->end - pool->pos) - mhd_RED_ZONE_SIZE; } @@ -417,8 +419,8 @@ mhd_pool_allocate (struct mhd_MemoryPool *restrict pool, mhd_assert (pool->end >= pool->pos); mhd_assert (pool->size >= pool->end - pool->pos); - mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos)); - asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size); + mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos)); + asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size); if ( (0 == asize) && (0 != size) ) return NULL; /* size too close to SIZE_MAX */ if (asize > pool->end - pool->pos) @@ -433,7 +435,7 @@ mhd_pool_allocate (struct mhd_MemoryPool *restrict pool, ret = &pool->memory[pool->pos]; pool->pos += asize; } - _MHD_UNPOISON_MEMORY (ret, size); + mhd_UNPOISON_MEMORY (ret, size); return ret; } @@ -464,7 +466,7 @@ mhd_pool_is_resizable_inplace (struct mhd_MemoryPool *restrict pool, mhd_assert (pool->size >= block_offset); mhd_assert (pool->size >= block_offset + block_size); return (pool->pos == - ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size)); + mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size)); } return false; /* Unallocated blocks cannot be resized in-place */ } @@ -499,8 +501,8 @@ mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool, mhd_assert (pool->end >= pool->pos); mhd_assert (pool->size >= pool->end - pool->pos); - mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos)); - asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size); + mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos)); + asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size); if ( (0 == asize) && (0 != size) ) { /* size is too close to SIZE_MAX, very unlikely */ *required_bytes = SIZE_MAX; @@ -509,7 +511,7 @@ mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool, if (asize > pool->end - pool->pos) { mhd_assert ((pool->end - pool->pos) == \ - ROUND_TO_ALIGN (pool->end - pool->pos)); + mhd_ROUND_TO_ALIGN (pool->end - pool->pos)); if (asize <= pool->end) *required_bytes = asize - (pool->end - pool->pos); else @@ -519,7 +521,7 @@ mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool, *required_bytes = 0; ret = &pool->memory[pool->end - asize]; pool->end -= asize; - _MHD_UNPOISON_MEMORY (ret, size); + mhd_UNPOISON_MEMORY (ret, size); return ret; } @@ -554,7 +556,7 @@ mhd_pool_reallocate (struct mhd_MemoryPool *pool, mhd_assert (pool->size >= pool->end - pool->pos); mhd_assert (old != NULL || old_size == 0); mhd_assert (pool->size >= old_size); - mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos)); + mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos)); #if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED) mhd_assert (NULL == __asan_region_is_poisoned (old, old_size)); #endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */ @@ -566,24 +568,24 @@ mhd_pool_reallocate (struct mhd_MemoryPool *pool, mhd_assert (mp_ptr_le_ (pool->memory, old)); /* (pool->memory + pool->size >= (uint8_t*) old + old_size) */ - mhd_assert ((pool->size - _MHD_RED_ZONE_SIZE) >= (old_offset + old_size)); + mhd_assert ((pool->size - mhd_RED_ZONE_SIZE) >= (old_offset + old_size)); /* Blocks "from the end" must not be reallocated */ /* (old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */ mhd_assert ((old_size == 0) || \ (pool->pos > old_offset)); mhd_assert ((old_size == 0) || \ - ((pool->end - _MHD_RED_ZONE_SIZE) >= (old_offset + old_size))); + ((pool->end - mhd_RED_ZONE_SIZE) >= (old_offset + old_size))); /* Try resizing in-place */ if (shrinking) { /* Shrinking in-place, zero-out freed part */ memset ((uint8_t *) old + new_size, 0, old_size - new_size); - _MHD_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size); + mhd_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size); } if (pool->pos == - ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size)) + mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size)) { /* "old" block is the last allocated block */ const size_t new_apos = - ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size); + mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size); if (! shrinking) { /* Grow in-place, check for enough space. */ if ( (new_apos > pool->end) || @@ -592,14 +594,14 @@ mhd_pool_reallocate (struct mhd_MemoryPool *pool, } /* Resized in-place */ pool->pos = new_apos; - _MHD_UNPOISON_MEMORY (old, new_size); + mhd_UNPOISON_MEMORY (old, new_size); return old; } if (shrinking) return old; /* Resized in-place, freed part remains allocated */ } /* Need to allocate new block */ - asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size); + asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size); if ( ( (0 == asize) && (0 != new_size) ) || /* Value wrap, too large new_size. */ (asize > pool->end - pool->pos) ) /* Not enough space */ @@ -608,14 +610,14 @@ mhd_pool_reallocate (struct mhd_MemoryPool *pool, new_blc = pool->memory + pool->pos; pool->pos += asize; - _MHD_UNPOISON_MEMORY (new_blc, new_size); + mhd_UNPOISON_MEMORY (new_blc, new_size); if (0 != old_size) { /* Move data to new block, old block remains allocated */ memcpy (new_blc, old, old_size); /* Zero-out old block */ memset (old, 0, old_size); - _MHD_POISON_MEMORY (old, old_size); + mhd_POISON_MEMORY (old, old_size); } return new_blc; } @@ -642,7 +644,7 @@ mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool, mhd_assert (pool->size >= pool->end - pool->pos); mhd_assert (block != NULL || block_size == 0); mhd_assert (pool->size >= block_size); - mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos)); + mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos)); if (NULL != block) { /* Have previously allocated data */ @@ -654,7 +656,7 @@ mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool, if (0 != block_size) { memset (block, 0, block_size); - _MHD_POISON_MEMORY (block, block_size); + mhd_POISON_MEMORY (block, block_size); } #if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE) else @@ -664,29 +666,29 @@ mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool, { /* "Normal" block, not allocated "from the end". */ const size_t alg_end = - ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size); + mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size); mhd_assert (alg_end <= pool->pos); if (alg_end == pool->pos) { /* The last allocated block, return deallocated block to the pool */ - size_t alg_start = ROUND_TO_ALIGN (block_offset); + size_t alg_start = mhd_ROUND_TO_ALIGN (block_offset); mhd_assert (alg_start >= block_offset); #if defined(MHD_ASAN_POISON_ACTIVE) if (alg_start != block_offset) { - _MHD_POISON_MEMORY (pool->memory + block_offset, \ - alg_start - block_offset); + mhd_POISON_MEMORY (pool->memory + block_offset, \ + alg_start - block_offset); } else if (0 != alg_start) { bool need_red_zone_before; - mhd_assert (_MHD_RED_ZONE_SIZE <= alg_start); + mhd_assert (mhd_RED_ZONE_SIZE <= alg_start); #if defined(HAVE___ASAN_REGION_IS_POISONED) need_red_zone_before = (NULL == __asan_region_is_poisoned (pool->memory + alg_start - - _MHD_RED_ZONE_SIZE, - _MHD_RED_ZONE_SIZE)); + - mhd_RED_ZONE_SIZE, + mhd_RED_ZONE_SIZE)); #elif defined(HAVE___ASAN_ADDRESS_IS_POISONED) need_red_zone_before = (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1)); @@ -695,13 +697,13 @@ mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool, #endif /* ! HAVE___ASAN_ADDRESS_IS_POISONED */ if (need_red_zone_before) { - _MHD_POISON_MEMORY (pool->memory + alg_start, _MHD_RED_ZONE_SIZE); - alg_start += _MHD_RED_ZONE_SIZE; + mhd_POISON_MEMORY (pool->memory + alg_start, mhd_RED_ZONE_SIZE); + alg_start += mhd_RED_ZONE_SIZE; } } #endif /* MHD_ASAN_POISON_ACTIVE */ mhd_assert (alg_start <= pool->pos); - mhd_assert (alg_start == ROUND_TO_ALIGN (alg_start)); + mhd_assert (alg_start == mhd_ROUND_TO_ALIGN (alg_start)); pool->pos = alg_start; } } @@ -711,12 +713,12 @@ mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool, /* The size and the pointers of such block should not be manipulated by MHD code (block split is disallowed). */ mhd_assert (block_offset >= pool->end); - mhd_assert (ROUND_TO_ALIGN (block_offset) == block_offset); + mhd_assert (mhd_ROUND_TO_ALIGN (block_offset) == block_offset); if (block_offset == pool->end) { /* The last allocated block, return deallocated block to the pool */ const size_t alg_end = - ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size); + mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size); pool->end = alg_end; } } @@ -755,7 +757,7 @@ mhd_pool_reset (struct mhd_MemoryPool *restrict pool, #if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED) mhd_assert (NULL == __asan_region_is_poisoned (keep, copy_bytes)); #endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */ - _MHD_UNPOISON_MEMORY (pool->memory, new_size); + mhd_UNPOISON_MEMORY (pool->memory, new_size); if ( (NULL != keep) && (keep != pool->memory) ) { @@ -770,7 +772,7 @@ mhd_pool_reset (struct mhd_MemoryPool *restrict pool, size_t to_zero; /** Size of area to zero-out */ to_zero = pool->size - copy_bytes; - _MHD_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero); + mhd_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero); #ifdef _WIN32 if (pool->is_mmap) { @@ -800,10 +802,10 @@ mhd_pool_reset (struct mhd_MemoryPool *restrict pool, 0, to_zero); } - pool->pos = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size); + pool->pos = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size); pool->end = pool->size; - _MHD_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \ - pool->size - new_size); + mhd_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \ + pool->size - new_size); return pool->memory; }