libmicrohttpd2

HTTP server C library (MHD 2.x, alpha)
Log | Files | Refs | README | LICENSE

mempool_funcs.c (24485B)


      1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */
      2 /*
      3   This file is part of GNU libmicrohttpd.
      4   Copyright (C) 2007--2024 Daniel Pittman and Christian Grothoff
      5   Copyright (C) 2014--2024 Evgeny Grin (Karlson2k)
      6 
      7   GNU libmicrohttpd is free software; you can redistribute it and/or
      8   modify it under the terms of the GNU Lesser General Public
      9   License as published by the Free Software Foundation; either
     10   version 2.1 of the License, or (at your option) any later version.
     11 
     12   GNU libmicrohttpd is distributed in the hope that it will be useful,
     13   but WITHOUT ANY WARRANTY; without even the implied warranty of
     14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     15   Lesser General Public License for more details.
     16 
     17   Alternatively, you can redistribute GNU libmicrohttpd and/or
     18   modify it under the terms of the GNU General Public License as
     19   published by the Free Software Foundation; either version 2 of
     20   the License, or (at your option) any later version, together
     21   with the eCos exception, as follows:
     22 
     23     As a special exception, if other files instantiate templates or
     24     use macros or inline functions from this file, or you compile this
     25     file and link it with other works to produce a work based on this
     26     file, this file does not by itself cause the resulting work to be
     27     covered by the GNU General Public License. However the source code
     28     for this file must still be made available in accordance with
     29     section (3) of the GNU General Public License v2.
     30 
     31     This exception does not invalidate any other reasons why a work
     32     based on this file might be covered by the GNU General Public
     33     License.
     34 
     35   You should have received copies of the GNU Lesser General Public
     36   License and the GNU General Public License along with this library;
     37   if not, see <https://www.gnu.org/licenses/>.
     38 */
     39 
     40 /**
     41  * @file src/mhd2/mempool_funcs.c
     42  * @brief memory pool
     43  * @author Christian Grothoff
     44  * @author Karlson2k (Evgeny Grin)
     45  * TODO:
     46  * + Update code style
     47  * + Detect mmap() in configure (it is purely optional!)
     48  */
     49 #include "mhd_sys_options.h"
     50 #include "mempool_funcs.h"
     51 #include "compat_calloc.h"
     52 
     53 #ifdef HAVE_STDLIB_H
     54 #  include <stdlib.h>
     55 #endif /* HAVE_STDLIB_H */
     56 #include <string.h>
     57 #include "mhd_assert.h"
     58 #ifdef HAVE_SYS_MMAN_H
     59 #  include <sys/mman.h>
     60 #endif
     61 #ifdef _WIN32
     62 #  include <windows.h>
     63 #endif
     64 #ifdef HAVE_SYSCONF
     65 #  include <unistd.h>
     66 #  if defined(_SC_PAGE_SIZE)
     67 #    define MHD_SC_PAGESIZE _SC_PAGE_SIZE
     68 #  elif defined(_SC_PAGESIZE)
     69 #    define MHD_SC_PAGESIZE _SC_PAGESIZE
     70 #  endif /* _SC_PAGESIZE */
     71 #endif /* HAVE_SYSCONF */
     72 
     73 #if defined(MHD_USE_PAGESIZE_MACRO) || defined(MHD_USE_PAGE_SIZE_MACRO)
     74 #  ifndef HAVE_SYSCONF /* Avoid duplicate include */
     75 #    include <unistd.h>
     76 #  endif /* HAVE_SYSCONF */
     77 #  ifdef HAVE_LIMITS_H
     78 #    include <limits.h>
     79 #  endif
     80 #  ifdef HAVE_SYS_PARAM_H
     81 #    include <sys/param.h>
     82 #  endif /* HAVE_SYS_PARAM_H */
     83 #endif /* MHD_USE_PAGESIZE_MACRO || MHD_USE_PAGE_SIZE_MACRO */
     84 
     85 #include "mhd_limits.h"
     86 
     87 #ifndef mhd_FALLBACK_PAGE_SIZE
     88 /**
     89  * Fallback value of page size
     90  */
     91 #  define mhd_FALLBACK_PAGE_SIZE (4096)
     92 #endif
     93 
     94 #if defined(MHD_USE_PAGESIZE_MACRO)
     95 #  define mhd_DEF_PAGE_SIZE PAGESIZE
     96 #elif defined(MHD_USE_PAGE_SIZE_MACRO)
     97 #  define mhd_DEF_PAGE_SIZE PAGE_SIZE
     98 #else  /* ! PAGESIZE */
     99 #  define mhd_DEF_PAGE_SIZE mhd_FALLBACK_PAGE_SIZE
    100 #endif /* ! PAGESIZE */
    101 
    102 
    103 #ifdef MHD_ASAN_POISON_ACTIVE
    104 #include <sanitizer/asan_interface.h>
    105 #endif /* MHD_ASAN_POISON_ACTIVE */
    106 
    107 #if defined(MAP_ANONYMOUS)
    108 #  define mhd_MAP_ANONYMOUS MAP_ANONYMOUS
    109 #endif
    110 
    111 #if ! defined(mhd_MAP_ANONYMOUS) && defined(MAP_ANON)
    112 #  define mhd_MAP_ANONYMOUS MAP_ANON
    113 #endif
    114 
    115 #if defined(mhd_MAP_ANONYMOUS) || defined(_WIN32)
    116 #  define mhd_USE_LARGE_ALLOCS 1
    117 #endif
    118 
    119 #ifdef mhd_USE_LARGE_ALLOCS
    120 #  if defined(_WIN32)
    121 #    define mhd_MAP_FAILED NULL
    122 #  elif defined(MAP_FAILED)
    123 #    define mhd_MAP_FAILED MAP_FAILED
    124 #  else
    125 #    define mhd_MAP_FAILED ((void*) -1)
    126 #  endif
    127 #endif
    128 
    129 /**
    130  * Round up 'n' to a multiple of ALIGN_SIZE.
    131  */
    132 #define mhd_ROUND_TO_ALIGN(n) \
    133         (((n) + (mhd_MEMPOOL_ALIGN_SIZE - 1)) \
    134          / (mhd_MEMPOOL_ALIGN_SIZE) * (mhd_MEMPOOL_ALIGN_SIZE))
    135 
    136 
    137 #ifndef MHD_ASAN_POISON_ACTIVE
    138 #  define mhd_NOSANITIZE_PTRS /**/
    139 #  define mhd_RED_ZONE_SIZE (0)
    140 #  define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) mhd_ROUND_TO_ALIGN (n)
    141 #  define mhd_POISON_MEMORY(pointer, size) (void) 0
    142 #  define mhd_UNPOISON_MEMORY(pointer, size) (void) 0
    143 /**
    144  * Boolean 'true' if the first pointer is less or equal the second pointer
    145  */
    146 #  define mp_ptr_le_(p1,p2) \
    147         (((const uint8_t*) (p1)) <= ((const uint8_t*) (p2)))
    148 /**
    149  * The difference in bytes between positions of the first and
    150  * the second pointers
    151  */
    152 #  define mp_ptr_diff_(p1,p2) \
    153         ((size_t) (((const uint8_t*) (p1)) - ((const uint8_t*) (p2))))
    154 #else  /* MHD_ASAN_POISON_ACTIVE */
    155 #  define mhd_RED_ZONE_SIZE (mhd_MEMPOOL_ALIGN_SIZE)
    156 #  define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) \
    157         (mhd_ROUND_TO_ALIGN (n) + mhd_RED_ZONE_SIZE)
    158 #  define mhd_POISON_MEMORY(pointer, size) \
    159         ASAN_POISON_MEMORY_REGION ((pointer), (size))
    160 #  define mhd_UNPOISON_MEMORY(pointer, size) \
    161         ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
    162 #  if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS)
    163 /**
    164  * Boolean 'true' if the first pointer is less or equal the second pointer
    165  */
    166 #    define mp_ptr_le_(p1,p2) \
    167         (((uintptr_t) ((const void*) (p1))) <= \
    168          ((uintptr_t) ((const void*) (p2))))
    169 /**
    170  * The difference in bytes between positions of the first and
    171  * the second pointers
    172  */
    173 #    define mp_ptr_diff_(p1,p2) \
    174         ((size_t) (((uintptr_t) ((const uint8_t*) (p1))) - \
    175                    ((uintptr_t) ((const uint8_t*) (p2)))))
    176 #elif defined(FUNC_ATTR_PTRCOMPARE_WORKS) && \
    177   defined(FUNC_ATTR_PTRSUBTRACT_WORKS)
    178 #    ifndef NDEBUG
    179 /**
    180  * Boolean 'true' if the first pointer is less or equal the second pointer
    181  */
    182 __attribute__((no_sanitize ("pointer-compare"))) static bool
    183 mp_ptr_le_ (const void *p1, const void *p2)
    184 {
    185   return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
    186 }
    187 
    188 
    189 #    endif /* _DEBUG */
    190 
    191 
    192 /**
    193  * The difference in bytes between positions of the first and
    194  * the second pointers
    195  */
    196 __attribute__((no_sanitize ("pointer-subtract"))) static size_t
    197 mp_ptr_diff_ (const void *p1, const void *p2)
    198 {
    199   return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
    200 }
    201 
    202 
    203 #  elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
    204 #    ifndef NDEBUG
    205 /**
    206  * Boolean 'true' if the first pointer is less or equal the second pointer
    207  */
    208 __attribute__((no_sanitize ("address"))) static bool
    209 mp_ptr_le_ (const void *p1, const void *p2)
    210 {
    211   return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
    212 }
    213 
    214 
    215     #endif /* _DEBUG */
    216 
    217 /**
    218  * The difference in bytes between positions of the first and
    219  * the second pointers
    220  */
    221 __attribute__((no_sanitize ("address"))) static size_t
    222 mp_ptr_diff_ (const void *p1, const void *p2)
    223 {
    224   return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
    225 }
    226 
    227 
    228 #  else  /* ! FUNC_ATTR_NOSANITIZE_WORKS */
    229 #error User-poisoning cannot be used
    230 #  endif /* ! FUNC_ATTR_NOSANITIZE_WORKS */
    231 #endif /* MHD_ASAN_POISON_ACTIVE */
    232 
    233 #ifdef mhd_USE_LARGE_ALLOCS
    234 /**
    235  * Size of memory page
    236  */
    237 static size_t MHD_sys_page_size_ = (size_t)
    238 #  if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
    239                                    PAGESIZE;
    240 #  elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
    241                                    PAGE_SIZE;
    242 #  else  /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
    243                                    mhd_FALLBACK_PAGE_SIZE; /* Default fallback value */
    244 #  endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
    245 #endif /* mhd_USE_LARGE_ALLOCS */
    246 
    247 void
    248 mhd_init_mem_pools (void)
    249 {
    250 #ifdef mhd_USE_LARGE_ALLOCS
    251 #ifdef MHD_SC_PAGESIZE
    252   long result;
    253   result = sysconf (MHD_SC_PAGESIZE);
    254   if (-1 != result)
    255     MHD_sys_page_size_ = (size_t) result;
    256   else
    257     MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE;
    258 #elif defined(_WIN32)
    259   SYSTEM_INFO si;
    260   GetSystemInfo (&si);
    261   MHD_sys_page_size_ = (size_t) si.dwPageSize;
    262 #else
    263   MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE;
    264 #endif /* _WIN32 */
    265   mhd_assert (0 == (MHD_sys_page_size_ % mhd_MEMPOOL_ALIGN_SIZE));
    266 #endif /* mhd_USE_LARGE_ALLOCS */
    267   (void) 0;
    268 }
    269 
    270 
    271 /**
    272  * Handle for a memory pool.  Pools are not reentrant and must not be
    273  * used by multiple threads.
    274  */
    275 struct mhd_MemoryPool
    276 {
    277 
    278   /**
    279    * Pointer to the pool's memory
    280    */
    281   uint8_t *memory;
    282 
    283   /**
    284    * Size of the pool.
    285    */
    286   size_t size;
    287 
    288   /**
    289    * Offset of the first unallocated byte.
    290    */
    291   size_t pos;
    292 
    293   /**
    294    * Offset of the byte after the last unallocated byte.
    295    */
    296   size_t end;
    297 
    298 #ifdef mhd_USE_LARGE_ALLOCS
    299   /**
    300    * 'false' if pool was malloc'ed, 'true' if mmapped (VirtualAlloc'ed for W32).
    301    */
    302   bool is_large_alloc;
    303 #endif
    304 
    305   /**
    306    * Memory allocation zeroing mode
    307    */
    308   enum mhd_MemPoolZeroing zeroing;
    309 };
    310 
    311 
    312 MHD_INTERNAL mhd_FN_RET_UNALIASED
    313 mhd_FN_OBJ_CONSTRUCTOR (mhd_pool_destroy)
    314 struct mhd_MemoryPool *
    315 mhd_pool_create (size_t max,
    316                  enum mhd_MemPoolZeroing zeroing)
    317 {
    318   struct mhd_MemoryPool *pool;
    319   size_t alloc_size;
    320 
    321   mhd_assert (max > 0);
    322   mhd_assert (mhd_RED_ZONE_SIZE < (max + mhd_RED_ZONE_SIZE));
    323   max += mhd_RED_ZONE_SIZE;
    324   alloc_size = 0;
    325   pool = (struct mhd_MemoryPool *) malloc (sizeof (struct mhd_MemoryPool));
    326   if (NULL == pool)
    327     return NULL;
    328   pool->zeroing = zeroing;
    329 #ifdef mhd_USE_LARGE_ALLOCS
    330   pool->is_large_alloc = false;
    331   if ( (max <= 32 * 1024) ||
    332        (max < MHD_sys_page_size_ * 4 / 3) )
    333   {
    334     pool->memory = (uint8_t *) mhd_MAP_FAILED;
    335   }
    336   else
    337   {
    338     /* Round up allocation to page granularity. */
    339     alloc_size = max + MHD_sys_page_size_ - 1;
    340     alloc_size -= alloc_size % MHD_sys_page_size_;
    341     pool->is_large_alloc = true;
    342 #  if defined(mhd_MAP_ANONYMOUS)
    343     pool->memory = (uint8_t *) mmap (NULL,
    344                                      alloc_size,
    345                                      PROT_READ | PROT_WRITE,
    346                                      MAP_PRIVATE | mhd_MAP_ANONYMOUS,
    347                                      -1,
    348                                      0);
    349 #  else  /* ! mhd_MAP_ANONYMOUS */
    350     pool->memory = (uint8_t *) VirtualAlloc (NULL,
    351                                              alloc_size,
    352                                              MEM_COMMIT | MEM_RESERVE,
    353                                              PAGE_READWRITE);
    354 #  endif /* ! mhd_MAP_ANONYMOUS */
    355   }
    356   if (mhd_MAP_FAILED != pool->memory)
    357     pool->is_large_alloc = true;
    358   else
    359 #endif /* mhd_USE_LARGE_ALLOCS */
    360   if (! 0)
    361   {
    362     alloc_size = mhd_ROUND_TO_ALIGN (max);
    363     if (MHD_MEMPOOL_ZEROING_NEVER == zeroing)
    364       pool->memory = (uint8_t *) malloc (alloc_size);
    365     else
    366       pool->memory = (uint8_t *) mhd_calloc (1, alloc_size);
    367     if (((uint8_t *) NULL) == pool->memory)
    368     {
    369       free (pool);
    370       return NULL;
    371     }
    372   }
    373   mhd_assert (0 == (((uintptr_t) pool->memory) % mhd_MEMPOOL_ALIGN_SIZE));
    374   pool->pos = 0;
    375   pool->end = alloc_size;
    376   pool->size = alloc_size;
    377   mhd_assert (0 < alloc_size);
    378   mhd_POISON_MEMORY (pool->memory, pool->size);
    379   return pool;
    380 }
    381 
    382 
    383 MHD_INTERNAL void
    384 mhd_pool_destroy (struct mhd_MemoryPool *restrict pool)
    385 {
    386   if (NULL == pool)
    387     return;
    388 
    389   mhd_assert (pool->end >= pool->pos);
    390   mhd_assert (pool->size >= pool->end - pool->pos);
    391   mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
    392   mhd_UNPOISON_MEMORY (pool->memory, pool->size);
    393 #ifdef mhd_USE_LARGE_ALLOCS
    394   if (pool->is_large_alloc)
    395   {
    396 #  if defined(mhd_MAP_ANONYMOUS)
    397     munmap (pool->memory,
    398             pool->size);
    399 #  else
    400     VirtualFree (pool->memory,
    401                  0,
    402                  MEM_RELEASE);
    403 #  endif
    404   }
    405   else
    406 #endif /* mhd_USE_LARGE_ALLOCS*/
    407   if (! 0)
    408     free (pool->memory);
    409 
    410   free (pool);
    411 }
    412 
    413 
    414 MHD_INTERNAL MHD_FN_PAR_NONNULL_ALL_ MHD_FN_PURE_ size_t
    415 mhd_pool_get_size (const struct mhd_MemoryPool *restrict pool)
    416 {
    417   return (pool->size - mhd_RED_ZONE_SIZE);
    418 }
    419 
    420 
    421 MHD_INTERNAL size_t
    422 mhd_pool_get_free (struct mhd_MemoryPool *restrict pool)
    423 {
    424   mhd_assert (pool->end >= pool->pos);
    425   mhd_assert (pool->size >= pool->end - pool->pos);
    426   mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
    427 #ifdef MHD_ASAN_POISON_ACTIVE
    428   if ((pool->end - pool->pos) <= mhd_RED_ZONE_SIZE)
    429     return 0;
    430 #endif /* MHD_ASAN_POISON_ACTIVE */
    431   return (pool->end - pool->pos) - mhd_RED_ZONE_SIZE;
    432 }
    433 
    434 
    435 MHD_INTERNAL mhd_FN_RET_UNALIASED
    436 mhd_FN_RET_SIZED (2)
    437 mhd_FN_RET_ALIGNED (mhd_MEMPOOL_ALIGN_SIZE) void *
    438 mhd_pool_allocate (struct mhd_MemoryPool *restrict pool,
    439                    size_t size,
    440                    bool from_end)
    441 {
    442   void *ret;
    443   size_t asize;
    444 
    445   mhd_assert (pool->end >= pool->pos);
    446   mhd_assert (pool->size >= pool->end - pool->pos);
    447   mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
    448   asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
    449   if ( (0 == asize) && (0 != size) )
    450     return NULL; /* size too close to SIZE_MAX */
    451   if (asize > pool->end - pool->pos)
    452     return NULL;
    453   if (from_end)
    454   {
    455     ret = &pool->memory[pool->end - asize];
    456     pool->end -= asize;
    457   }
    458   else
    459   {
    460     ret = &pool->memory[pool->pos];
    461     pool->pos += asize;
    462   }
    463   mhd_UNPOISON_MEMORY (ret, size);
    464   return ret;
    465 }
    466 
    467 
    468 MHD_INTERNAL bool
    469 mhd_pool_is_resizable_inplace (struct mhd_MemoryPool *restrict pool,
    470                                void *restrict block,
    471                                size_t block_size)
    472 {
    473   mhd_assert (pool->end >= pool->pos);
    474   mhd_assert (pool->size >= pool->end - pool->pos);
    475   mhd_assert (block != NULL || block_size == 0);
    476   mhd_assert (pool->size >= block_size);
    477   if (NULL != block)
    478   {
    479     const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
    480     mhd_assert (mp_ptr_le_ (pool->memory, block));
    481     mhd_assert (pool->size >= block_offset);
    482     mhd_assert (pool->size >= block_offset + block_size);
    483     return (pool->pos ==
    484             mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size));
    485   }
    486   return false; /* Unallocated blocks cannot be resized in-place */
    487 }
    488 
    489 
    490 MHD_INTERNAL mhd_FN_RET_UNALIASED
    491 mhd_FN_RET_SIZED (2)
    492 mhd_FN_RET_ALIGNED (mhd_MEMPOOL_ALIGN_SIZE) void *
    493 mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool,
    494                     size_t size,
    495                     size_t *restrict required_bytes)
    496 {
    497   void *ret;
    498   size_t asize;
    499 
    500   mhd_assert (pool->end >= pool->pos);
    501   mhd_assert (pool->size >= pool->end - pool->pos);
    502   mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
    503   asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
    504   if ( (0 == asize) && (0 != size) )
    505   { /* size is too close to SIZE_MAX, very unlikely */
    506     *required_bytes = SIZE_MAX;
    507     return NULL;
    508   }
    509   if (asize > pool->end - pool->pos)
    510   {
    511     mhd_assert ((pool->end - pool->pos) == \
    512                 mhd_ROUND_TO_ALIGN (pool->end - pool->pos));
    513     if (asize <= pool->end)
    514       *required_bytes = asize - (pool->end - pool->pos);
    515     else
    516       *required_bytes = SIZE_MAX;
    517     return NULL;
    518   }
    519   *required_bytes = 0;
    520   ret = &pool->memory[pool->end - asize];
    521   pool->end -= asize;
    522   mhd_UNPOISON_MEMORY (ret, size);
    523   return ret;
    524 }
    525 
    526 
    527 MHD_INTERNAL
    528 mhd_FN_RET_SIZED (4) mhd_FN_RET_ALIGNED (mhd_MEMPOOL_ALIGN_SIZE)
    529 void *
    530 mhd_pool_reallocate (struct mhd_MemoryPool *restrict pool,
    531                      void *restrict old,
    532                      size_t old_size,
    533                      size_t new_size)
    534 {
    535   size_t asize;
    536   uint8_t *new_blc;
    537 
    538   mhd_assert (pool->end >= pool->pos);
    539   mhd_assert (pool->size >= pool->end - pool->pos);
    540   mhd_assert (old != NULL || old_size == 0);
    541   mhd_assert (pool->size >= old_size);
    542   mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
    543 #if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
    544   mhd_assert (NULL == __asan_region_is_poisoned (old, old_size));
    545 #endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
    546 
    547   if (NULL != old)
    548   {   /* Have previously allocated data */
    549     const size_t old_offset = mp_ptr_diff_ (old, pool->memory);
    550     const bool shrinking = (old_size > new_size);
    551 
    552     mhd_assert (mp_ptr_le_ (pool->memory, old));
    553     /* (pool->memory + pool->size >= (uint8_t*) old + old_size) */
    554     mhd_assert ((pool->size - mhd_RED_ZONE_SIZE) >= (old_offset + old_size));
    555     /* Blocks "from the end" must not be reallocated */
    556     /* (old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */
    557     mhd_assert ((old_size == 0) || \
    558                 (pool->pos > old_offset));
    559     mhd_assert ((old_size == 0) || \
    560                 ((pool->end - mhd_RED_ZONE_SIZE) >= (old_offset + old_size)));
    561     /* Try resizing in-place */
    562     if (shrinking)
    563     {     /* Shrinking in-place, zero-out freed part */
    564       if (MHD_MEMPOOL_ZEROING_ON_RESET < pool->zeroing)
    565         memset ((uint8_t *) old + new_size, 0, old_size - new_size);
    566       mhd_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size);
    567     }
    568     if (pool->pos ==
    569         mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size))
    570     {     /* "old" block is the last allocated block */
    571       const size_t new_apos =
    572         mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size);
    573       if (! shrinking)
    574       {                               /* Grow in-place, check for enough space. */
    575         if ( (new_apos > pool->end) ||
    576              (new_apos < pool->pos) ) /* Value wrap */
    577           return NULL;                /* No space */
    578       }
    579       /* Resized in-place */
    580       pool->pos = new_apos;
    581       mhd_UNPOISON_MEMORY (old, new_size);
    582       return old;
    583     }
    584     if (shrinking)
    585       return old;   /* Resized in-place, freed part remains allocated */
    586   }
    587   /* Need to allocate new block */
    588   asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
    589   if ( ( (0 == asize) &&
    590          (0 != new_size) ) || /* Value wrap, too large new_size. */
    591        (asize > pool->end - pool->pos) ) /* Not enough space */
    592     return NULL;
    593 
    594   new_blc = pool->memory + pool->pos;
    595   pool->pos += asize;
    596 
    597   mhd_UNPOISON_MEMORY (new_blc, new_size);
    598   if (0 != old_size)
    599   {
    600     /* Move data to new block, old block remains allocated */
    601     memcpy (new_blc, old, old_size);
    602     /* Zero-out old block */
    603     if (MHD_MEMPOOL_ZEROING_ON_RESET < pool->zeroing)
    604       memset (old, 0, old_size);
    605     mhd_POISON_MEMORY (old, old_size);
    606   }
    607   return new_blc;
    608 }
    609 
    610 
    611 MHD_INTERNAL void
    612 mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool,
    613                      void *restrict block,
    614                      size_t block_size)
    615 {
    616   mhd_assert (pool->end >= pool->pos);
    617   mhd_assert (pool->size >= pool->end - pool->pos);
    618   mhd_assert (block != NULL || block_size == 0);
    619   mhd_assert (pool->size >= block_size);
    620   mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
    621 
    622   if (NULL != block)
    623   {   /* Have previously allocated data */
    624     const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
    625     mhd_assert (mp_ptr_le_ (pool->memory, block));
    626     mhd_assert (block_offset <= pool->size);
    627     mhd_assert ((block_offset != pool->pos) || (block_size == 0));
    628     /* Zero-out deallocated region */
    629     if (0 != block_size)
    630     {
    631       if (MHD_MEMPOOL_ZEROING_ON_RESET < pool->zeroing)
    632         memset (block, 0, block_size);
    633       mhd_POISON_MEMORY (block, block_size);
    634     }
    635 #if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE)
    636     else
    637       return; /* Zero size, no need to do anything */
    638 #endif /* ! MHD_FAVOR_SMALL_CODE && ! MHD_ASAN_POISON_ACTIVE */
    639     if (block_offset <= pool->pos)
    640     {
    641       /* "Normal" block, not allocated "from the end". */
    642       const size_t alg_end =
    643         mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
    644       mhd_assert (alg_end <= pool->pos);
    645       if (alg_end == pool->pos)
    646       {
    647         /* The last allocated block, return deallocated block to the pool */
    648         size_t alg_start = mhd_ROUND_TO_ALIGN (block_offset);
    649         mhd_assert (alg_start >= block_offset);
    650 #if defined(MHD_ASAN_POISON_ACTIVE)
    651         if (alg_start != block_offset)
    652         {
    653           mhd_POISON_MEMORY (pool->memory + block_offset, \
    654                              alg_start - block_offset);
    655         }
    656         else if (0 != alg_start)
    657         {
    658           bool need_red_zone_before;
    659           mhd_assert (mhd_RED_ZONE_SIZE <= alg_start);
    660 #if defined(HAVE___ASAN_REGION_IS_POISONED)
    661           need_red_zone_before =
    662             (NULL == __asan_region_is_poisoned (pool->memory
    663                                                 + alg_start
    664                                                 - mhd_RED_ZONE_SIZE,
    665                                                 mhd_RED_ZONE_SIZE));
    666 #elif defined(HAVE___ASAN_ADDRESS_IS_POISONED)
    667           need_red_zone_before =
    668             (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1));
    669 #else  /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
    670           need_red_zone_before = true; /* Unknown, assume new red zone needed */
    671 #endif /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
    672           if (need_red_zone_before)
    673           {
    674             mhd_POISON_MEMORY (pool->memory + alg_start, mhd_RED_ZONE_SIZE);
    675             alg_start += mhd_RED_ZONE_SIZE;
    676           }
    677         }
    678 #endif /* MHD_ASAN_POISON_ACTIVE */
    679         mhd_assert (alg_start <= pool->pos);
    680         mhd_assert (alg_start == mhd_ROUND_TO_ALIGN (alg_start));
    681         pool->pos = alg_start;
    682       }
    683     }
    684     else
    685     {
    686       /* Allocated "from the end" block. */
    687       /* The size and the pointers of such block should not be manipulated by
    688          MHD code (block split is disallowed). */
    689       mhd_assert (block_offset >= pool->end);
    690       mhd_assert (mhd_ROUND_TO_ALIGN (block_offset) == block_offset);
    691       if (block_offset == pool->end)
    692       {
    693         /* The last allocated block, return deallocated block to the pool */
    694         const size_t alg_end =
    695           mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
    696         pool->end = alg_end;
    697       }
    698     }
    699   }
    700 }
    701 
    702 
    703 MHD_INTERNAL
    704 mhd_FN_RET_SIZED (4) mhd_FN_RET_ALIGNED (mhd_MEMPOOL_ALIGN_SIZE)
    705 void *
    706 mhd_pool_reset (struct mhd_MemoryPool *restrict pool,
    707                 void *restrict keep,
    708                 size_t copy_bytes,
    709                 size_t new_size)
    710 {
    711   mhd_assert (pool->end >= pool->pos);
    712   mhd_assert (pool->size >= pool->end - pool->pos);
    713   mhd_assert (copy_bytes <= new_size);
    714   mhd_assert (copy_bytes + mhd_RED_ZONE_SIZE <= pool->size);
    715   mhd_assert (keep != NULL || copy_bytes == 0);
    716   mhd_assert (keep == NULL || mp_ptr_le_ (pool->memory, keep));
    717   /* (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep + copy_bytes) */
    718   mhd_assert ((keep == NULL) || \
    719               (pool->size >= mp_ptr_diff_ (keep, pool->memory) + copy_bytes));
    720 #if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
    721   mhd_assert (NULL == __asan_region_is_poisoned (keep, copy_bytes));
    722 #endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
    723   mhd_UNPOISON_MEMORY (pool->memory, new_size);
    724   if ( (NULL != keep) &&
    725        (keep != pool->memory) )
    726   {
    727     if (0 != copy_bytes)
    728       memmove (pool->memory,
    729                keep,
    730                copy_bytes);
    731   }
    732   if ((MHD_MEMPOOL_ZEROING_NEVER != pool->zeroing) &&
    733       (pool->size > copy_bytes))
    734   {
    735     size_t to_zero;   /** Size of area to zero-out */
    736 
    737     to_zero = pool->size - copy_bytes;
    738     mhd_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero);
    739 #if defined(mhd_USE_LARGE_ALLOCS) && defined(_WIN32)
    740     if (pool->is_large_alloc)
    741     {
    742       size_t to_recommit;     /** Size of decommitted and re-committed area. */
    743       uint8_t *recommit_addr;
    744       /* Round down to page size */
    745       to_recommit = to_zero - to_zero % MHD_sys_page_size_;
    746       recommit_addr = pool->memory + pool->size - to_recommit;
    747 
    748       /* De-committing and re-committing again clear memory and make
    749        * pages free / available for other needs until accessed. */
    750       if (VirtualFree (recommit_addr,
    751                        to_recommit,
    752                        MEM_DECOMMIT))
    753       {
    754         to_zero -= to_recommit;
    755 
    756         if (recommit_addr != VirtualAlloc (recommit_addr,
    757                                            to_recommit,
    758                                            MEM_COMMIT,
    759                                            PAGE_READWRITE))
    760           abort ();      /* Serious error, must never happen */
    761       }
    762     }
    763 #endif /* mhd_USE_LARGE_ALLOCS && _WIN32 */
    764     memset (&pool->memory[copy_bytes],
    765             0,
    766             to_zero);
    767   }
    768   pool->pos = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
    769   pool->end = pool->size;
    770   mhd_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \
    771                      pool->size - new_size);
    772   return pool->memory;
    773 }
    774 
    775 
    776 /* end of memorypool.c */