libmicrohttpd

HTTP/1.x server C library (MHD 1.x, stable)
Log | Files | Refs | Submodules | README | LICENSE

memorypool.c (24957B)


      1 /*
      2      This file is part of libmicrohttpd
      3      Copyright (C) 2007--2024 Daniel Pittman and Christian Grothoff
      4      Copyright (C) 2014--2024 Evgeny Grin (Karlson2k)
      5 
      6      This library is free software; you can redistribute it and/or
      7      modify it under the terms of the GNU Lesser General Public
      8      License as published by the Free Software Foundation; either
      9      version 2.1 of the License, or (at your option) any later version.
     10 
     11      This library is distributed in the hope that it will be useful,
     12      but WITHOUT ANY WARRANTY; without even the implied warranty of
     13      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14      Lesser General Public License for more details.
     15 
     16      You should have received a copy of the GNU Lesser General Public
     17      License along with this library; if not, write to the Free Software
     18      Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
     19 */
     20 
     21 /**
     22  * @file memorypool.c
     23  * @brief memory pool
     24  * @author Christian Grothoff
     25  * @author Karlson2k (Evgeny Grin)
     26  */
     27 #include "memorypool.h"
     28 #ifdef HAVE_STDLIB_H
     29 #include <stdlib.h>
     30 #endif /* HAVE_STDLIB_H */
     31 #include <string.h>
     32 #include <stdint.h>
     33 #include "mhd_assert.h"
     34 #ifdef HAVE_SYS_MMAN_H
     35 #include <sys/mman.h>
     36 #endif
     37 #ifdef _WIN32
     38 #include <windows.h>
     39 #endif
     40 #ifdef HAVE_SYSCONF
     41 #include <unistd.h>
     42 #if defined(_SC_PAGE_SIZE)
     43 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE
     44 #elif defined(_SC_PAGESIZE)
     45 #define MHD_SC_PAGESIZE _SC_PAGESIZE
     46 #endif /* _SC_PAGESIZE */
     47 #endif /* HAVE_SYSCONF */
     48 #include "mhd_limits.h" /* for SIZE_MAX, PAGESIZE / PAGE_SIZE */
     49 
     50 #if defined(MHD_USE_PAGESIZE_MACRO) || defined(MHD_USE_PAGE_SIZE_MACRO)
     51 #ifndef HAVE_SYSCONF /* Avoid duplicate include */
     52 #include <unistd.h>
     53 #endif /* HAVE_SYSCONF */
     54 #ifdef HAVE_SYS_PARAM_H
     55 #include <sys/param.h>
     56 #endif /* HAVE_SYS_PARAM_H */
     57 #endif /* MHD_USE_PAGESIZE_MACRO || MHD_USE_PAGE_SIZE_MACRO */
     58 
     59 /**
     60  * Fallback value of page size
     61  */
     62 #define _MHD_FALLBACK_PAGE_SIZE (4096)
     63 
     64 #if defined(MHD_USE_PAGESIZE_MACRO)
     65 #define MHD_DEF_PAGE_SIZE_ PAGESIZE
     66 #elif defined(MHD_USE_PAGE_SIZE_MACRO)
     67 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
     68 #else  /* ! PAGESIZE */
     69 #define MHD_DEF_PAGE_SIZE_ _MHD_FALLBACK_PAGE_SIZE
     70 #endif /* ! PAGESIZE */
     71 
     72 
     73 #ifdef MHD_ASAN_POISON_ACTIVE
     74 #include <sanitizer/asan_interface.h>
     75 #endif /* MHD_ASAN_POISON_ACTIVE */
     76 
     77 /* define MAP_ANONYMOUS for Mac OS X */
     78 #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
     79 #define MAP_ANONYMOUS MAP_ANON
     80 #endif
     81 #if defined(_WIN32)
     82 #define MAP_FAILED NULL
     83 #elif ! defined(MAP_FAILED)
     84 #define MAP_FAILED ((void*) -1)
     85 #endif
     86 
     87 /**
     88  * Align to 2x word size (as GNU libc does).
     89  */
     90 #define ALIGN_SIZE (2 * sizeof(void*))
     91 
     92 /**
     93  * Round up 'n' to a multiple of ALIGN_SIZE.
     94  */
     95 #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
     96                            / (ALIGN_SIZE) *(ALIGN_SIZE))
     97 
     98 
     99 #ifndef MHD_ASAN_POISON_ACTIVE
    100 #define _MHD_NOSANITIZE_PTRS /**/
    101 #define _MHD_RED_ZONE_SIZE (0)
    102 #define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) ROUND_TO_ALIGN(n)
    103 #define _MHD_POISON_MEMORY(pointer, size) (void)0
    104 #define _MHD_UNPOISON_MEMORY(pointer, size) (void)0
    105 /**
    106  * Boolean 'true' if the first pointer is less or equal the second pointer
    107  */
    108 #define mp_ptr_le_(p1,p2) \
    109   (((const uint8_t*)(p1)) <= ((const uint8_t*)(p2)))
    110 /**
    111  * The difference in bytes between positions of the first and
    112  * the second pointers
    113  */
    114 #define mp_ptr_diff_(p1,p2) \
    115   ((size_t)(((const uint8_t*)(p1)) - ((const uint8_t*)(p2))))
    116 #else  /* MHD_ASAN_POISON_ACTIVE */
    117 #define _MHD_RED_ZONE_SIZE (ALIGN_SIZE)
    118 #define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) (ROUND_TO_ALIGN(n) + _MHD_RED_ZONE_SIZE)
    119 #define _MHD_POISON_MEMORY(pointer, size) \
    120   ASAN_POISON_MEMORY_REGION ((pointer), (size))
    121 #define _MHD_UNPOISON_MEMORY(pointer, size) \
    122   ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
    123 #if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS)
    124 /**
    125  * Boolean 'true' if the first pointer is less or equal the second pointer
    126  */
    127 #define mp_ptr_le_(p1,p2) \
    128   (((uintptr_t)((const void*)(p1))) <= ((uintptr_t)((const void*)(p2))))
    129 /**
    130  * The difference in bytes between positions of the first and
    131  * the second pointers
    132  */
    133 #define mp_ptr_diff_(p1,p2) \
    134   ((size_t)(((uintptr_t)((const uint8_t*)(p1))) - \
    135             ((uintptr_t)((const uint8_t*)(p2)))))
    136 #elif defined(FUNC_ATTR_PTRCOMPARE_WORKS) && \
    137   defined(FUNC_ATTR_PTRSUBTRACT_WORKS)
    138 #ifdef _DEBUG
    139 /**
    140  * Boolean 'true' if the first pointer is less or equal the second pointer
    141  */
    142 __attribute__((no_sanitize ("pointer-compare"))) static bool
    143 mp_ptr_le_ (const void *p1, const void *p2)
    144 {
    145   return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
    146 }
    147 
    148 
    149 #endif /* _DEBUG */
    150 
    151 
    152 /**
    153  * The difference in bytes between positions of the first and
    154  * the second pointers
    155  */
    156 __attribute__((no_sanitize ("pointer-subtract"))) static size_t
    157 mp_ptr_diff_ (const void *p1, const void *p2)
    158 {
    159   return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
    160 }
    161 
    162 
    163 #elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
    164 #ifdef _DEBUG
    165 /**
    166  * Boolean 'true' if the first pointer is less or equal the second pointer
    167  */
    168 __attribute__((no_sanitize ("address"))) static bool
    169 mp_ptr_le_ (const void *p1, const void *p2)
    170 {
    171   return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
    172 }
    173 
    174 
    175 #endif /* _DEBUG */
    176 
    177 /**
    178  * The difference in bytes between positions of the first and
    179  * the second pointers
    180  */
    181 __attribute__((no_sanitize ("address"))) static size_t
    182 mp_ptr_diff_ (const void *p1, const void *p2)
    183 {
    184   return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
    185 }
    186 
    187 
    188 #else  /* ! FUNC_ATTR_NOSANITIZE_WORKS */
    189 #error User-poisoning cannot be used
    190 #endif /* ! FUNC_ATTR_NOSANITIZE_WORKS */
    191 #endif /* MHD_ASAN_POISON_ACTIVE */
    192 
    193 /**
    194  * Size of memory page
    195  */
    196 static size_t MHD_sys_page_size_ = (size_t)
    197 #if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
    198                                    PAGESIZE;
    199 #elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
    200                                    PAGE_SIZE;
    201 #else  /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
    202                                    _MHD_FALLBACK_PAGE_SIZE; /* Default fallback value */
    203 #endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
    204 
    205 /**
    206  * Initialise values for memory pools
    207  */
    208 void
    209 MHD_init_mem_pools_ (void)
    210 {
    211 #ifdef MHD_SC_PAGESIZE
    212   long result;
    213   result = sysconf (MHD_SC_PAGESIZE);
    214   if (-1 != result)
    215     MHD_sys_page_size_ = (size_t) result;
    216   else
    217     MHD_sys_page_size_ = (size_t) MHD_DEF_PAGE_SIZE_;
    218 #elif defined(_WIN32)
    219   SYSTEM_INFO si;
    220   GetSystemInfo (&si);
    221   MHD_sys_page_size_ = (size_t) si.dwPageSize;
    222 #else
    223   MHD_sys_page_size_ = (size_t) MHD_DEF_PAGE_SIZE_;
    224 #endif /* _WIN32 */
    225   mhd_assert (0 == (MHD_sys_page_size_ % ALIGN_SIZE));
    226 }
    227 
    228 
    229 /**
    230  * Handle for a memory pool.  Pools are not reentrant and must not be
    231  * used by multiple threads.
    232  */
    233 struct MemoryPool
    234 {
    235 
    236   /**
    237    * Pointer to the pool's memory
    238    */
    239   uint8_t *memory;
    240 
    241   /**
    242    * Size of the pool.
    243    */
    244   size_t size;
    245 
    246   /**
    247    * Offset of the first unallocated byte.
    248    */
    249   size_t pos;
    250 
    251   /**
    252    * Offset of the byte after the last unallocated byte.
    253    */
    254   size_t end;
    255 
    256   /**
    257    * 'false' if pool was malloc'ed, 'true' if mmapped (VirtualAlloc'ed for W32).
    258    */
    259   bool is_mmap;
    260 };
    261 
    262 
    263 /**
    264  * Create a memory pool.
    265  *
    266  * @param max maximum size of the pool
    267  * @return NULL on error
    268  */
    269 struct MemoryPool *
    270 MHD_pool_create (size_t max)
    271 {
    272   struct MemoryPool *pool;
    273   size_t alloc_size;
    274 
    275   mhd_assert (max > 0);
    276   alloc_size = 0;
    277   pool = malloc (sizeof (struct MemoryPool));
    278   if (NULL == pool)
    279     return NULL;
    280 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
    281   if ( (max <= 32 * 1024) ||
    282        (max < MHD_sys_page_size_ * 4 / 3) )
    283   {
    284     pool->memory = MAP_FAILED;
    285   }
    286   else
    287   {
    288     /* Round up allocation to page granularity. */
    289     alloc_size = max + MHD_sys_page_size_ - 1;
    290     alloc_size -= alloc_size % MHD_sys_page_size_;
    291 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
    292     pool->memory = mmap (NULL,
    293                          alloc_size,
    294                          PROT_READ | PROT_WRITE,
    295                          MAP_PRIVATE | MAP_ANONYMOUS,
    296                          -1,
    297                          0);
    298 #elif defined(_WIN32)
    299     pool->memory = VirtualAlloc (NULL,
    300                                  alloc_size,
    301                                  MEM_COMMIT | MEM_RESERVE,
    302                                  PAGE_READWRITE);
    303 #endif /* _WIN32 */
    304   }
    305 #else  /* ! _WIN32 && ! MAP_ANONYMOUS */
    306   pool->memory = MAP_FAILED;
    307 #endif /* ! _WIN32 && ! MAP_ANONYMOUS */
    308   if (MAP_FAILED == pool->memory)
    309   {
    310     alloc_size = ROUND_TO_ALIGN (max);
    311     pool->memory = malloc (alloc_size);
    312     if (NULL == pool->memory)
    313     {
    314       free (pool);
    315       return NULL;
    316     }
    317     pool->is_mmap = false;
    318   }
    319 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
    320   else
    321   {
    322     pool->is_mmap = true;
    323   }
    324 #endif /* _WIN32 || MAP_ANONYMOUS */
    325   mhd_assert (0 == (((uintptr_t) pool->memory) % ALIGN_SIZE));
    326   pool->pos = 0;
    327   pool->end = alloc_size;
    328   pool->size = alloc_size;
    329   mhd_assert (0 < alloc_size);
    330   _MHD_POISON_MEMORY (pool->memory, pool->size);
    331   return pool;
    332 }
    333 
    334 
    335 /**
    336  * Destroy a memory pool.
    337  *
    338  * @param pool memory pool to destroy
    339  */
    340 void
    341 MHD_pool_destroy (struct MemoryPool *pool)
    342 {
    343   if (NULL == pool)
    344     return;
    345 
    346   mhd_assert (pool->end >= pool->pos);
    347   mhd_assert (pool->size >= pool->end - pool->pos);
    348   mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
    349   _MHD_UNPOISON_MEMORY (pool->memory, pool->size);
    350   if (! pool->is_mmap)
    351     free (pool->memory);
    352   else
    353 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
    354     munmap (pool->memory,
    355             pool->size);
    356 #elif defined(_WIN32)
    357     VirtualFree (pool->memory,
    358                  0,
    359                  MEM_RELEASE);
    360 #else
    361     abort ();
    362 #endif
    363   free (pool);
    364 }
    365 
    366 
    367 /**
    368  * Check how much memory is left in the @a pool
    369  *
    370  * @param pool pool to check
    371  * @return number of bytes still available in @a pool
    372  */
    373 size_t
    374 MHD_pool_get_free (struct MemoryPool *pool)
    375 {
    376   mhd_assert (pool->end >= pool->pos);
    377   mhd_assert (pool->size >= pool->end - pool->pos);
    378   mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
    379 #ifdef MHD_ASAN_POISON_ACTIVE
    380   if ((pool->end - pool->pos) <= _MHD_RED_ZONE_SIZE)
    381     return 0;
    382 #endif /* MHD_ASAN_POISON_ACTIVE */
    383   return (pool->end - pool->pos) - _MHD_RED_ZONE_SIZE;
    384 }
    385 
    386 
    387 /**
    388  * Allocate size bytes from the pool.
    389  *
    390  * @param pool memory pool to use for the operation
    391  * @param size number of bytes to allocate
    392  * @param from_end allocate from end of pool (set to 'true');
    393  *        use this for small, persistent allocations that
    394  *        will never be reallocated
    395  * @return NULL if the pool cannot support size more
    396  *         bytes
    397  */
    398 void *
    399 MHD_pool_allocate (struct MemoryPool *pool,
    400                    size_t size,
    401                    bool from_end)
    402 {
    403   void *ret;
    404   size_t asize;
    405 
    406   mhd_assert (pool->end >= pool->pos);
    407   mhd_assert (pool->size >= pool->end - pool->pos);
    408   mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
    409   asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
    410   if ( (0 == asize) && (0 != size) )
    411     return NULL; /* size too close to SIZE_MAX */
    412   if (asize > pool->end - pool->pos)
    413     return NULL;
    414   if (from_end)
    415   {
    416     ret = &pool->memory[pool->end - asize];
    417     pool->end -= asize;
    418   }
    419   else
    420   {
    421     ret = &pool->memory[pool->pos];
    422     pool->pos += asize;
    423   }
    424   _MHD_UNPOISON_MEMORY (ret, size);
    425   return ret;
    426 }
    427 
    428 
    429 /**
    430  * Checks whether allocated block is re-sizable in-place.
    431  * If block is not re-sizable in-place, it still could be shrunk, but freed
    432  * memory will not be re-used until reset of the pool.
    433  * @param pool the memory pool to use
    434  * @param block the pointer to the allocated block to check
    435  * @param block_size the size of the allocated @a block
    436  * @return true if block can be resized in-place in the optimal way,
    437  *         false otherwise
    438  */
    439 bool
    440 MHD_pool_is_resizable_inplace (struct MemoryPool *pool,
    441                                void *block,
    442                                size_t block_size)
    443 {
    444   mhd_assert (pool->end >= pool->pos);
    445   mhd_assert (pool->size >= pool->end - pool->pos);
    446   mhd_assert (block != NULL || block_size == 0);
    447   mhd_assert (pool->size >= block_size);
    448   if (NULL != block)
    449   {
    450     const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
    451     mhd_assert (mp_ptr_le_ (pool->memory, block));
    452     mhd_assert (pool->size >= block_offset);
    453     mhd_assert (pool->size >= block_offset + block_size);
    454     return (pool->pos ==
    455             ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size));
    456   }
    457   return false; /* Unallocated blocks cannot be resized in-place */
    458 }
    459 
    460 
    461 /**
    462  * Try to allocate @a size bytes memory area from the @a pool.
    463  *
    464  * If allocation fails, @a required_bytes is updated with size required to be
    465  * freed in the @a pool from rellocatable area to allocate requested number
    466  * of bytes.
    467  * Allocated memory area is always not rellocatable ("from end").
    468  *
    469  * @param pool memory pool to use for the operation
    470  * @param size the size of memory in bytes to allocate
    471  * @param[out] required_bytes the pointer to variable to be updated with
    472  *                            the size of the required additional free
    473  *                            memory area, set to 0 if function succeeds.
    474  *                            Cannot be NULL.
    475  * @return the pointer to allocated memory area if succeed,
    476  *         NULL if the pool doesn't have enough space, required_bytes is updated
    477  *         with amount of space needed to be freed in rellocatable area or
    478  *         set to SIZE_MAX if requested size is too large for the pool.
    479  */
    480 void *
    481 MHD_pool_try_alloc (struct MemoryPool *pool,
    482                     size_t size,
    483                     size_t *required_bytes)
    484 {
    485   void *ret;
    486   size_t asize;
    487 
    488   mhd_assert (pool->end >= pool->pos);
    489   mhd_assert (pool->size >= pool->end - pool->pos);
    490   mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
    491   asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
    492   if ( (0 == asize) && (0 != size) )
    493   { /* size is too close to SIZE_MAX, very unlikely */
    494     *required_bytes = SIZE_MAX;
    495     return NULL;
    496   }
    497   if (asize > pool->end - pool->pos)
    498   {
    499     mhd_assert ((pool->end - pool->pos) == \
    500                 ROUND_TO_ALIGN (pool->end - pool->pos));
    501     if (asize <= pool->end)
    502       *required_bytes = asize - (pool->end - pool->pos);
    503     else
    504       *required_bytes = SIZE_MAX;
    505     return NULL;
    506   }
    507   *required_bytes = 0;
    508   ret = &pool->memory[pool->end - asize];
    509   pool->end -= asize;
    510   _MHD_UNPOISON_MEMORY (ret, size);
    511   return ret;
    512 }
    513 
    514 
    515 /**
    516  * Reallocate a block of memory obtained from the pool.
    517  * This is particularly efficient when growing or
    518  * shrinking the block that was last (re)allocated.
    519  * If the given block is not the most recently
    520  * (re)allocated block, the memory of the previous
    521  * allocation may be not released until the pool is
    522  * destroyed or reset.
    523  *
    524  * @param pool memory pool to use for the operation
    525  * @param old the existing block
    526  * @param old_size the size of the existing block
    527  * @param new_size the new size of the block
    528  * @return new address of the block, or
    529  *         NULL if the pool cannot support @a new_size
    530  *         bytes (old continues to be valid for @a old_size)
    531  */
    532 void *
    533 MHD_pool_reallocate (struct MemoryPool *pool,
    534                      void *old,
    535                      size_t old_size,
    536                      size_t new_size)
    537 {
    538   size_t asize;
    539   uint8_t *new_blc;
    540 
    541   mhd_assert (pool->end >= pool->pos);
    542   mhd_assert (pool->size >= pool->end - pool->pos);
    543   mhd_assert (old != NULL || old_size == 0);
    544   mhd_assert (pool->size >= old_size);
    545   mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
    546 #if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
    547   mhd_assert (NULL == __asan_region_is_poisoned (old, old_size));
    548 #endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
    549 
    550   if (NULL != old)
    551   {   /* Have previously allocated data */
    552     const size_t old_offset = mp_ptr_diff_ (old, pool->memory);
    553     const bool shrinking = (old_size > new_size);
    554 
    555     mhd_assert (mp_ptr_le_ (pool->memory, old));
    556     /* (pool->memory + pool->size >= (uint8_t*) old + old_size) */
    557     mhd_assert ((pool->size - _MHD_RED_ZONE_SIZE) >= (old_offset + old_size));
    558     /* Blocks "from the end" must not be reallocated */
    559     /* (old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */
    560     mhd_assert ((old_size == 0) || \
    561                 (pool->pos > old_offset));
    562     mhd_assert ((old_size == 0) || \
    563                 ((pool->end - _MHD_RED_ZONE_SIZE) >= (old_offset + old_size)));
    564     /* Try resizing in-place */
    565     if (shrinking)
    566     {     /* Shrinking in-place, zero-out freed part */
    567       memset ((uint8_t *) old + new_size, 0, old_size - new_size);
    568       _MHD_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size);
    569     }
    570     if (pool->pos ==
    571         ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size))
    572     {     /* "old" block is the last allocated block */
    573       const size_t new_apos =
    574         ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size);
    575       if (! shrinking)
    576       {                               /* Grow in-place, check for enough space. */
    577         if ( (new_apos > pool->end) ||
    578              (new_apos < pool->pos) ) /* Value wrap */
    579           return NULL;                /* No space */
    580       }
    581       /* Resized in-place */
    582       pool->pos = new_apos;
    583       _MHD_UNPOISON_MEMORY (old, new_size);
    584       return old;
    585     }
    586     if (shrinking)
    587       return old;   /* Resized in-place, freed part remains allocated */
    588   }
    589   /* Need to allocate new block */
    590   asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
    591   if ( ( (0 == asize) &&
    592          (0 != new_size) ) || /* Value wrap, too large new_size. */
    593        (asize > pool->end - pool->pos) ) /* Not enough space */
    594     return NULL;
    595 
    596   new_blc = pool->memory + pool->pos;
    597   pool->pos += asize;
    598 
    599   _MHD_UNPOISON_MEMORY (new_blc, new_size);
    600   if (0 != old_size)
    601   {
    602     /* Move data to new block, old block remains allocated */
    603     memcpy (new_blc, old, old_size);
    604     /* Zero-out old block */
    605     memset (old, 0, old_size);
    606     _MHD_POISON_MEMORY (old, old_size);
    607   }
    608   return new_blc;
    609 }
    610 
    611 
    612 /**
    613  * Deallocate a block of memory obtained from the pool.
    614  *
    615  * If the given block is not the most recently
    616  * (re)allocated block, the memory of the this block
    617  * allocation may be not released until the pool is
    618  * destroyed or reset.
    619  *
    620  * @param pool memory pool to use for the operation
    621  * @param block the allocated block, the NULL is tolerated
    622  * @param block_size the size of the allocated block
    623  */
    624 void
    625 MHD_pool_deallocate (struct MemoryPool *pool,
    626                      void *block,
    627                      size_t block_size)
    628 {
    629   mhd_assert (pool->end >= pool->pos);
    630   mhd_assert (pool->size >= pool->end - pool->pos);
    631   mhd_assert (block != NULL || block_size == 0);
    632   mhd_assert (pool->size >= block_size);
    633   mhd_assert (pool->pos == ROUND_TO_ALIGN (pool->pos));
    634 
    635   if (NULL != block)
    636   {   /* Have previously allocated data */
    637     const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
    638     mhd_assert (mp_ptr_le_ (pool->memory, block));
    639     mhd_assert (block_offset <= pool->size);
    640     mhd_assert ((block_offset != pool->pos) || (block_size == 0));
    641     /* Zero-out deallocated region */
    642     if (0 != block_size)
    643     {
    644       memset (block, 0, block_size);
    645       _MHD_POISON_MEMORY (block, block_size);
    646     }
    647 #if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE)
    648     else
    649       return; /* Zero size, no need to do anything */
    650 #endif /* ! MHD_FAVOR_SMALL_CODE && ! MHD_ASAN_POISON_ACTIVE */
    651     if (block_offset <= pool->pos)
    652     {
    653       /* "Normal" block, not allocated "from the end". */
    654       const size_t alg_end =
    655         ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
    656       mhd_assert (alg_end <= pool->pos);
    657       if (alg_end == pool->pos)
    658       {
    659         /* The last allocated block, return deallocated block to the pool */
    660         size_t alg_start = ROUND_TO_ALIGN (block_offset);
    661         mhd_assert (alg_start >= block_offset);
    662 #if defined(MHD_ASAN_POISON_ACTIVE)
    663         if (alg_start != block_offset)
    664         {
    665           _MHD_POISON_MEMORY (pool->memory + block_offset, \
    666                               alg_start - block_offset);
    667         }
    668         else if (0 != alg_start)
    669         {
    670           bool need_red_zone_before;
    671           mhd_assert (_MHD_RED_ZONE_SIZE <= alg_start);
    672 #if defined(HAVE___ASAN_REGION_IS_POISONED)
    673           need_red_zone_before =
    674             (NULL == __asan_region_is_poisoned (pool->memory
    675                                                 + alg_start
    676                                                 - _MHD_RED_ZONE_SIZE,
    677                                                 _MHD_RED_ZONE_SIZE));
    678 #elif defined(HAVE___ASAN_ADDRESS_IS_POISONED)
    679           need_red_zone_before =
    680             (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1));
    681 #else  /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
    682           need_red_zone_before = true; /* Unknown, assume new red zone needed */
    683 #endif /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
    684           if (need_red_zone_before)
    685           {
    686             _MHD_POISON_MEMORY (pool->memory + alg_start, _MHD_RED_ZONE_SIZE);
    687             alg_start += _MHD_RED_ZONE_SIZE;
    688           }
    689         }
    690 #endif /* MHD_ASAN_POISON_ACTIVE */
    691         mhd_assert (alg_start <= pool->pos);
    692         mhd_assert (alg_start == ROUND_TO_ALIGN (alg_start));
    693         pool->pos = alg_start;
    694       }
    695     }
    696     else
    697     {
    698       /* Allocated "from the end" block. */
    699       /* The size and the pointers of such block should not be manipulated by
    700          MHD code (block split is disallowed). */
    701       mhd_assert (block_offset >= pool->end);
    702       mhd_assert (ROUND_TO_ALIGN (block_offset) == block_offset);
    703       if (block_offset == pool->end)
    704       {
    705         /* The last allocated block, return deallocated block to the pool */
    706         const size_t alg_end =
    707           ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
    708         pool->end = alg_end;
    709       }
    710     }
    711   }
    712 }
    713 
    714 
    715 /**
    716  * Clear all entries from the memory pool except
    717  * for @a keep of the given @a copy_bytes.  The pointer
    718  * returned should be a buffer of @a new_size where
    719  * the first @a copy_bytes are from @a keep.
    720  *
    721  * @param pool memory pool to use for the operation
    722  * @param keep pointer to the entry to keep (maybe NULL)
    723  * @param copy_bytes how many bytes need to be kept at this address
    724  * @param new_size how many bytes should the allocation we return have?
    725  *                 (should be larger or equal to @a copy_bytes)
    726  * @return addr new address of @a keep (if it had to change)
    727  */
    728 void *
    729 MHD_pool_reset (struct MemoryPool *pool,
    730                 void *keep,
    731                 size_t copy_bytes,
    732                 size_t new_size)
    733 {
    734   mhd_assert (pool->end >= pool->pos);
    735   mhd_assert (pool->size >= pool->end - pool->pos);
    736   mhd_assert (copy_bytes <= new_size);
    737   mhd_assert (copy_bytes <= pool->size);
    738   mhd_assert (keep != NULL || copy_bytes == 0);
    739   mhd_assert (keep == NULL || mp_ptr_le_ (pool->memory, keep));
    740   /* (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep + copy_bytes) */
    741   mhd_assert ((keep == NULL) || \
    742               (pool->size >= mp_ptr_diff_ (keep, pool->memory) + copy_bytes));
    743 #if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
    744   mhd_assert (NULL == __asan_region_is_poisoned (keep, copy_bytes));
    745 #endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
    746   _MHD_UNPOISON_MEMORY (pool->memory, new_size);
    747   if ( (NULL != keep) &&
    748        (keep != pool->memory) )
    749   {
    750     if (0 != copy_bytes)
    751       memmove (pool->memory,
    752                keep,
    753                copy_bytes);
    754   }
    755   /* technically not needed, but safer to zero out */
    756   if (pool->size > copy_bytes)
    757   {
    758     size_t to_zero;   /** Size of area to zero-out */
    759 
    760     to_zero = pool->size - copy_bytes;
    761     _MHD_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero);
    762 #ifdef _WIN32
    763     if (pool->is_mmap)
    764     {
    765       size_t to_recommit;     /** Size of decommitted and re-committed area. */
    766       uint8_t *recommit_addr;
    767       /* Round down to page size */
    768       to_recommit = to_zero - to_zero % MHD_sys_page_size_;
    769       recommit_addr = pool->memory + pool->size - to_recommit;
    770 
    771       /* De-committing and re-committing again clear memory and make
    772        * pages free / available for other needs until accessed. */
    773       if (VirtualFree (recommit_addr,
    774                        to_recommit,
    775                        MEM_DECOMMIT))
    776       {
    777         to_zero -= to_recommit;
    778 
    779         if (recommit_addr != VirtualAlloc (recommit_addr,
    780                                            to_recommit,
    781                                            MEM_COMMIT,
    782                                            PAGE_READWRITE))
    783           abort ();      /* Serious error, must never happen */
    784       }
    785     }
    786 #endif /* _WIN32 */
    787     memset (&pool->memory[copy_bytes],
    788             0,
    789             to_zero);
    790   }
    791   pool->pos = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
    792   pool->end = pool->size;
    793   _MHD_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \
    794                       pool->size - new_size);
    795   return pool->memory;
    796 }
    797 
    798 
    799 /* end of memorypool.c */