commit 427f40095ca485f1acad59c5d50ab26fe93da723
parent f1579ec8008ba0ecd23fb15b50a5bd7c31914e0f
Author: Evgeny Grin (Karlson2k) <k2k@drgrin.dev>
Date: Fri, 6 Jun 2025 00:31:08 +0200
Made zeroing of memory pool optional, other memory pool improvements
Added new functions attributes, useful for memory pool.
Added new daemon settings to control memory pool zeroing
Diffstat:
20 files changed, 1361 insertions(+), 1019 deletions(-)
diff --git a/configure.ac b/configure.ac
@@ -1312,7 +1312,7 @@ MHD_CHECK_CC_CFLAG([-Werror=attributes],[CFLAGS_ac],
]
)
AS_VAR_IF([mhd_cv_cflag_werror_attr_works],["yes"],
- [errattr_CFLAGS="-Werror=attributes"]
+ [errattr_CFLAGS="-Werror=attributes"],[errattr_CFLAGS=""]
)
]
)
@@ -4427,6 +4427,149 @@ AS_VAR_IF([mhd_cv_cc_attr_ret_nonnull],["yes"],
)
]
)
+AC_CACHE_CHECK([whether $CC supports __attribute__ ((assume_aligned (N)))],[mhd_cv_cc_attr_func_assume_aligned],
+ [
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+static __attribute__((assume_aligned (1))) void*
+test_func(void) {
+ static signed char c = 0;
+ return (void*) &c;
+}
+
+int main(void) {
+ return test_func() ? 0 : 1;
+}
+ ]])
+ ],
+ [mhd_cv_cc_attr_func_assume_aligned="yes"],[mhd_cv_cc_attr_func_assume_aligned="no"]
+ )
+ ]
+)
+AS_VAR_IF([mhd_cv_cc_attr_func_assume_aligned],["yes"],
+ [
+ AC_DEFINE([HAVE_ATTR_FUNC_ASSUME_ALIGNED],[1],
+ [Define to '1' if your compiler supports __attribute__((assume_aligned(N)))]
+ )
+ ]
+)
+AC_CACHE_CHECK([whether $CC supports __attribute__ ((alloc_size (N)))],[mhd_cv_cc_attr_func_alloc_size],
+ [
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#if defined(HAVE_STDDEF_H)
+# include <stddef.h> /* NULL */
+#else
+# include <string.h> /* should provide NULL */
+#endif
+
+static __attribute__((alloc_size(1))) void*
+test_alloc(unsigned short size) {
+ static char buf_alloc[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ static unsigned short allocated = 0;
+ void *ret;
+ if (sizeof(buf_alloc) - allocated < size)
+ return NULL;
+ ret = (void*) (buf_alloc + allocated);
+ allocated -= size;
+ return ret;
+}
+
+int main(void) {
+ return test_alloc(1) ? 0 : 1;
+}
+ ]])
+ ],
+ [mhd_cv_cc_attr_func_alloc_size="yes"],[mhd_cv_cc_attr_func_alloc_size="no"]
+ )
+ ]
+)
+AS_VAR_IF([mhd_cv_cc_attr_func_alloc_size],["yes"],
+ [
+ AC_DEFINE([HAVE_ATTR_FUNC_ALLOC_SIZE],[1],
+ [Define to '1' if your compiler supports __attribute__((alloc_size(N)))]
+ )
+ ]
+)
+AC_CACHE_CHECK([whether $CC supports __attribute__ ((malloc))],[mhd_cv_cc_attr_func_malloc],
+ [
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#if defined(HAVE_STDDEF_H)
+# include <stddef.h> /* NULL */
+#else
+# include <string.h> /* should provide NULL */
+#endif
+
+static __attribute__((malloc)) void*
+test_alloc(void) {
+ static char buf_alloc[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ static int used_flag = 0;
+ if (used_flag)
+ return NULL;
+ used_flag = !0;
+ return buf_alloc;
+}
+
+int main(void) {
+ return test_alloc() ? 0 : 1;
+}
+ ]])
+ ],
+ [mhd_cv_cc_attr_func_malloc="yes"],[mhd_cv_cc_attr_func_malloc="no"]
+ )
+ ]
+)
+AS_VAR_IF([mhd_cv_cc_attr_func_malloc],["yes"],
+ [
+ AC_DEFINE([HAVE_ATTR_FUNC_MALLOC],[1],
+ [Define to '1' if your compiler supports __attribute__((malloc))]
+ )
+ ]
+)
+AC_CACHE_CHECK([whether $CC supports __attribute__ ((malloc(deallocator)))],[mhd_cv_cc_attr_func_malloc_dealloc],
+ [
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+#if defined(HAVE_STDDEF_H)
+# include <stddef.h> /* NULL */
+#else
+# include <string.h> /* should provide NULL */
+#endif
+
+static int used_flag = 0;
+static char buf_alloc[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+
+static void
+test_dealloc(void *ptr) {
+ if (! used_flag)
+ return;
+ if (ptr != (void*) buf_alloc)
+ return;
+ used_flag = 0;
+}
+
+static __attribute__((malloc(test_dealloc))) void*
+test_alloc(void) {
+ if (used_flag)
+ return NULL;
+ used_flag = !0;
+ return (void*) buf_alloc;
+}
+
+int main(void) {
+ test_dealloc(test_alloc());
+ return 0;
+}
+ ]])
+ ],
+ [mhd_cv_cc_attr_func_malloc_dealloc="yes"],[mhd_cv_cc_attr_func_malloc_dealloc="no"]
+ )
+ ]
+)
+AS_VAR_IF([mhd_cv_cc_attr_func_malloc_dealloc],["yes"],
+ [
+ AC_DEFINE([HAVE_ATTR_FUNC_MALLOC_DEALLOC],[1],
+ [Define to '1' if your compiler supports __attribute__((malloc(deallocator)))]
+ )
+ ]
+)
AC_CACHE_CHECK([whether $CC supports __attribute__ ((nonnull))],[mhd_cv_cc_attr_nonnull],
[
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
diff --git a/src/incl_priv/mhd_sys_options.h b/src/incl_priv/mhd_sys_options.h
@@ -35,9 +35,9 @@
#define MHD_SYS_OPTIONS_H 1
#ifndef HAVE_CONFIG_H
-#ifndef _MSC_VER
+# ifndef _MSC_VER
#error HAVE_CONFIG_H must be defined
-#endif
+# endif
#endif
#include "mhd_config.h"
@@ -220,6 +220,55 @@
# define MHD_FN_PAR_DYN_ARR_SIZE_(size) 1
#endif
+#ifdef HAVE_ATTR_FUNC_ASSUME_ALIGNED
+# define mhd_FN_RET_ALIGNED(align) __attribute__((assume_aligned(align)))
+#else
+# define mhd_FN_RET_ALIGNED(align) /* empty */
+#endif
+
+#ifdef HAVE_ATTR_FUNC_ALLOC_SIZE
+/**
+ * Indicates that returned pointer points to object with the size specified
+ * by parameter number @a param_num.
+ */
+# define mhd_FN_RET_SIZED(param_num) __attribute__((alloc_size(param_num)))
+#else
+/**
+ * Indicates that returned pointer points to object with the size specified
+ * by parameter number @a param_num.
+ */
+# define mhd_FN_RET_SIZED(align) /* empty */
+#endif
+
+#ifdef HAVE_ATTR_FUNC_MALLOC
+/**
+ * Indicates that returned pointer is unique and does not alias any other
+ * pointers.
+ */
+# define mhd_FN_RET_UNALIASED __attribute__((malloc))
+#else
+/**
+ * Indicates that returned pointer is unique and does not alias any other
+ * pointers.
+ */
+# define mhd_FN_RET_UNALIASED /* empty */
+#endif
+
+#ifdef HAVE_ATTR_FUNC_MALLOC_DEALLOC
+/**
+ * Indicates that function creates the object that mast be destructed by
+ * another function @a destructor.
+ */
+# define mhd_FN_OBJ_CONSTRUCTOR(destructor) \
+ __attribute__((malloc(destructor)))
+#else
+/**
+ * Indicates that function creates the object that mast be destructed by
+ * another function @a destructor.
+ */
+ # define mhd_FN_OBJ_CONSTRUCTOR(destructor) /* empty */
+#endif
+
#ifdef HAVE_ATTR_ENUM_EXTNS_CLOSED
# define MHD_FIXED_ENUM_ __attribute__((enum_extensibility (closed)))
#else
diff --git a/src/include/d_options.rec b/src/include/d_options.rec
@@ -290,6 +290,12 @@ Description1: the accept policy callback
Argument2: void *apc_cls
Description2: the closure for the callback
+Name: CONN_BUFF_ZEROING
+Value: 164
+Comment: Set mode of connection memory buffer zeroing
+Argument1: enum MHD_ConnBufferZeroingMode buff_zeroing
+Description1: buffer zeroing mode
+
# Requests processing
Name: protocol_strict_level
diff --git a/src/include/microhttpd2.h b/src/include/microhttpd2.h
@@ -3535,6 +3535,32 @@ enum MHD_FIXED_ENUM_APP_SET_ MHD_UseStictLevel
MHD_USL_NEAREST = 2
};
+
+/**
+ * Connection memory buffer zeroing mode.
+ * Works as a hardening measure.
+ */
+enum MHD_FIXED_ENUM_APP_SET_ MHD_ConnBufferZeroingMode
+{
+ /**
+ * Do not perform zeroing of connection memory buffer.
+ * Default mode.
+ */
+ MHD_CONN_BUFFER_ZEROING_DISABLED = 0
+ ,
+ /**
+ * Perform connection memory buffer zeroing before processing request.
+ */
+ MHD_CONN_BUFFER_ZEROING_BASIC = 1
+ ,
+ /**
+ * Perform connection memory buffer zeroing before processing request and
+ * when reusing buffer memory areas during processing request.
+ */
+ MHD_CONN_BUFFER_ZEROING_HEAVY = 2
+};
+
+
/* ********************** (d) TLS support ********************** */
/**
@@ -4338,6 +4364,16 @@ MHD_D_OPTION_ACCEPT_POLICY (
);
/**
+ * Set mode of connection memory buffer zeroing
+ * @param buff_zeroing buffer zeroing mode
+ * @return structure with the requested setting
+ */
+struct MHD_DaemonOptionAndValue
+MHD_D_OPTION_CONN_BUFF_ZEROING (
+ enum MHD_ConnBufferZeroingMode buff_zeroing
+ );
+
+/**
* Set how strictly MHD will enforce the HTTP protocol.
* @param sl the level of strictness
* @param how the way how to use the requested level
diff --git a/src/include/microhttpd2_generated_daemon_options.h b/src/include/microhttpd2_generated_daemon_options.h
@@ -181,6 +181,12 @@ Works only when #MHD_D_OPTION_BIND_PORT() or #MHD_D_OPTION_BIND_SA() are used.
,
/**
+ * Set mode of connection memory buffer zeroing
+ */
+ MHD_D_O_CONN_BUFF_ZEROING = 164
+ ,
+
+ /**
* Set how strictly MHD will enforce the HTTP protocol.
*/
MHD_D_O_PROTOCOL_STRICT_LEVEL = 200
@@ -733,6 +739,12 @@ union MHD_DaemonOptionValue
struct MHD_DaemonOptionValueAcceptPol accept_policy;
/**
+ * Value for #MHD_D_O_CONN_BUFF_ZEROING.
+ * buffer zeroing mode
+ */
+ enum MHD_ConnBufferZeroingMode conn_buff_zeroing;
+
+ /**
* Value for #MHD_D_O_PROTOCOL_STRICT_LEVEL.
* the level of strictness
*/
@@ -1199,6 +1211,19 @@ Works only when #MHD_D_OPTION_BIND_PORT() or #MHD_D_OPTION_BIND_SA() are used.
} \
MHD_RESTORE_WARN_COMPOUND_LITERALS_ MHD_RESTORE_WARN_AGGR_DYN_INIT_
/**
+ * Set mode of connection memory buffer zeroing
+ * @param buff_zeroing buffer zeroing mode
+ * @return structure with the requested setting
+ */
+# define MHD_D_OPTION_CONN_BUFF_ZEROING(buff_zeroing) \
+ MHD_NOWARN_COMPOUND_LITERALS_ MHD_NOWARN_AGGR_DYN_INIT_ \
+ (const struct MHD_DaemonOptionAndValue) \
+ { \
+ .opt = MHD_D_O_CONN_BUFF_ZEROING, \
+ .val.conn_buff_zeroing = (buff_zeroing) \
+ } \
+ MHD_RESTORE_WARN_COMPOUND_LITERALS_ MHD_RESTORE_WARN_AGGR_DYN_INIT_
+/**
* Set how strictly MHD will enforce the HTTP protocol.
* @param sl the level of strictness
* @param how the way how to use the requested level
@@ -2001,6 +2026,25 @@ MHD_D_OPTION_ACCEPT_POLICY (
/**
+ * Set mode of connection memory buffer zeroing
+ * @param buff_zeroing buffer zeroing mode
+ * @return structure with the requested setting
+ */
+static MHD_INLINE struct MHD_DaemonOptionAndValue
+MHD_D_OPTION_CONN_BUFF_ZEROING (
+ enum MHD_ConnBufferZeroingMode buff_zeroing
+ )
+{
+ struct MHD_DaemonOptionAndValue opt_val;
+
+ opt_val.opt = MHD_D_O_CONN_BUFF_ZEROING;
+ opt_val.val.conn_buff_zeroing = buff_zeroing;
+
+ return opt_val;
+}
+
+
+/**
* Set how strictly MHD will enforce the HTTP protocol.
* @param sl the level of strictness
* @param how the way how to use the requested level
diff --git a/src/include/microhttpd2_preamble.h.in b/src/include/microhttpd2_preamble.h.in
@@ -3535,6 +3535,32 @@ enum MHD_FIXED_ENUM_APP_SET_ MHD_UseStictLevel
MHD_USL_NEAREST = 2
};
+
+/**
+ * Connection memory buffer zeroing mode.
+ * Works as a hardening measure.
+ */
+enum MHD_FIXED_ENUM_APP_SET_ MHD_ConnBufferZeroingMode
+{
+ /**
+ * Do not perform zeroing of connection memory buffer.
+ * Default mode.
+ */
+ MHD_CONN_BUFFER_ZEROING_DISABLED = 0
+ ,
+ /**
+ * Perform connection memory buffer zeroing before processing request.
+ */
+ MHD_CONN_BUFFER_ZEROING_BASIC = 1
+ ,
+ /**
+ * Perform connection memory buffer zeroing before processing request and
+ * when reusing buffer memory areas during processing request.
+ */
+ MHD_CONN_BUFFER_ZEROING_HEAVY = 2
+};
+
+
/* ********************** (d) TLS support ********************** */
/**
diff --git a/src/mhd2/Makefile.am b/src/mhd2/Makefile.am
@@ -58,7 +58,7 @@ libmicrohttpd2_la_SOURCES = \
mhd_itc.c mhd_itc.h mhd_itc_types.h \
mhd_threads.c mhd_threads.h sys_thread_entry_type.h \
mhd_mono_clock.c mhd_mono_clock.h \
- mhd_mempool.c mhd_mempool.h \
+ mempool_funcs.c mempool_funcs.h mempool_types.h \
mhd_read_file.c mhd_read_file.h \
mhd_recv.c mhd_recv.h \
mhd_send.c mhd_send.h \
diff --git a/src/mhd2/daemon_add_conn.c b/src/mhd2/daemon_add_conn.c
@@ -63,7 +63,7 @@
#include "daemon_logger.h"
#include "mhd_mono_clock.h"
-#include "mhd_mempool.h"
+#include "mempool_funcs.h"
#include "events_process.h"
#include "response_from.h"
@@ -343,7 +343,8 @@ new_connection_process_inner (struct MHD_Daemon *restrict daemon,
* intensively used memory area is allocated in "good"
* (for the thread) memory region. It is important with
* NUMA and/or complex cache hierarchy. */
- connection->pool = mdh_pool_create (daemon->conns.cfg.mem_pool_size);
+ connection->pool = mhd_pool_create (daemon->conns.cfg.mem_pool_size,
+ daemon->conns.cfg.mem_pool_zeroing);
if (NULL == connection->pool)
{ /* 'pool' creation failed */
mhd_LOG_MSG (daemon, MHD_SC_POOL_MEM_ALLOC_FAILURE, \
diff --git a/src/mhd2/daemon_options.h b/src/mhd2/daemon_options.h
@@ -158,6 +158,13 @@ struct DaemonOptions
/**
+ * Value for #MHD_D_O_CONN_BUFF_ZEROING.
+ * buffer zeroing mode
+ */
+ enum MHD_ConnBufferZeroingMode conn_buff_zeroing;
+
+
+ /**
* Value for #MHD_D_O_PROTOCOL_STRICT_LEVEL.
* the level of strictness
*/
diff --git a/src/mhd2/daemon_set_options.c b/src/mhd2/daemon_set_options.c
@@ -163,6 +163,9 @@ MHD_daemon_set_options (
settings->accept_policy.v_apc = option->val.accept_policy.v_apc;
settings->accept_policy.v_apc_cls = option->val.accept_policy.v_apc_cls;
continue;
+ case MHD_D_O_CONN_BUFF_ZEROING:
+ settings->conn_buff_zeroing = option->val.conn_buff_zeroing;
+ continue;
case MHD_D_O_PROTOCOL_STRICT_LEVEL:
settings->protocol_strict_level.v_sl = option->val.protocol_strict_level.v_sl;
settings->protocol_strict_level.v_how = option->val.protocol_strict_level.v_how;
diff --git a/src/mhd2/daemon_start.c b/src/mhd2/daemon_start.c
@@ -2426,6 +2426,20 @@ init_individual_conns (struct MHD_Daemon *restrict d,
else if (256 > d->conns.cfg.mem_pool_size)
d->conns.cfg.mem_pool_size = 256;
+ switch (s->conn_buff_zeroing)
+ {
+ case MHD_CONN_BUFFER_ZEROING_DISABLED:
+ d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_NEVER;
+ break;
+ case MHD_CONN_BUFFER_ZEROING_BASIC:
+ d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ON_RESET;
+ break;
+ case MHD_CONN_BUFFER_ZEROING_HEAVY:
+ default:
+ d->conns.cfg.mem_pool_zeroing = MHD_MEMPOOL_ZEROING_ALWAYS;
+ break;
+ }
+
#ifdef MHD_SUPPORT_UPGRADE
mhd_DLINKEDL_INIT_LIST (&(d->conns.upgr),upgr_cleanup);
if (! mhd_mutex_init (&(d->conns.upgr.ucu_lock)))
diff --git a/src/mhd2/mempool_funcs.c b/src/mhd2/mempool_funcs.c
@@ -0,0 +1,744 @@
+/*
+ This file is part of libmicrohttpd
+ Copyright (C) 2007--2024 Daniel Pittman and Christian Grothoff
+ Copyright (C) 2014--2024 Evgeny Grin (Karlson2k)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/**
+ * @file src/mhd2/mempool_funcs.c
+ * @brief memory pool
+ * @author Christian Grothoff
+ * @author Karlson2k (Evgeny Grin)
+ * TODO:
+ * + Update code style
+ * + Detect mmap() in configure (it is purely optional!)
+ */
+#include "mhd_sys_options.h"
+#include "mempool_funcs.h"
+#include "compat_calloc.h"
+
+#ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+#endif /* HAVE_STDLIB_H */
+#include <string.h>
+#include "mhd_assert.h"
+#ifdef HAVE_SYS_MMAN_H
+# include <sys/mman.h>
+#endif
+#ifdef _WIN32
+# include <windows.h>
+#endif
+#ifdef HAVE_SYSCONF
+# include <unistd.h>
+# if defined(_SC_PAGE_SIZE)
+# define MHD_SC_PAGESIZE _SC_PAGE_SIZE
+# elif defined(_SC_PAGESIZE)
+# define MHD_SC_PAGESIZE _SC_PAGESIZE
+# endif /* _SC_PAGESIZE */
+#endif /* HAVE_SYSCONF */
+
+#if defined(MHD_USE_PAGESIZE_MACRO) || defined(MHD_USE_PAGE_SIZE_MACRO)
+# ifndef HAVE_SYSCONF /* Avoid duplicate include */
+# include <unistd.h>
+# endif /* HAVE_SYSCONF */
+# ifdef HAVE_LIMITS_H
+# include <limits.h>
+# endif
+# ifdef HAVE_SYS_PARAM_H
+# include <sys/param.h>
+# endif /* HAVE_SYS_PARAM_H */
+#endif /* MHD_USE_PAGESIZE_MACRO || MHD_USE_PAGE_SIZE_MACRO */
+
+#include "mhd_limits.h"
+
+#ifndef mhd_FALLBACK_PAGE_SIZE
+/**
+ * Fallback value of page size
+ */
+# define mhd_FALLBACK_PAGE_SIZE (4096)
+#endif
+
+#if defined(MHD_USE_PAGESIZE_MACRO)
+# define mhd_DEF_PAGE_SIZE PAGESIZE
+#elif defined(MHD_USE_PAGE_SIZE_MACRO)
+# define mhd_DEF_PAGE_SIZE PAGE_SIZE
+#else /* ! PAGESIZE */
+# define mhd_DEF_PAGE_SIZE mhd_FALLBACK_PAGE_SIZE
+#endif /* ! PAGESIZE */
+
+
+#ifdef MHD_ASAN_POISON_ACTIVE
+#include <sanitizer/asan_interface.h>
+#endif /* MHD_ASAN_POISON_ACTIVE */
+
+#if defined(MAP_ANONYMOUS)
+# define mhd_MAP_ANONYMOUS MAP_ANONYMOUS
+#endif
+
+#if ! defined(mhd_MAP_ANONYMOUS) && defined(MAP_ANON)
+# define mhd_MAP_ANONYMOUS MAP_ANON
+#endif
+
+#if defined(mhd_MAP_ANONYMOUS) || defined(_WIN32)
+# define mhd_USE_LARGE_ALLOCS 1
+#endif
+
+#ifdef mhd_USE_LARGE_ALLOCS
+# if defined(_WIN32)
+# define mhd_MAP_FAILED NULL
+# elif defined(MAP_FAILED)
+# define mhd_MAP_FAILED MAP_FAILED
+# else
+# define mhd_MAP_FAILED ((void*) -1)
+# endif
+#endif
+
+/**
+ * Round up 'n' to a multiple of ALIGN_SIZE.
+ */
+#define mhd_ROUND_TO_ALIGN(n) \
+ (((n) + (mhd_MEMPOOL_ALIGN_SIZE - 1)) \
+ / (mhd_MEMPOOL_ALIGN_SIZE) *(mhd_MEMPOOL_ALIGN_SIZE))
+
+
+#ifndef MHD_ASAN_POISON_ACTIVE
+# define mhd_NOSANITIZE_PTRS /**/
+# define mhd_RED_ZONE_SIZE (0)
+# define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) mhd_ROUND_TO_ALIGN (n)
+# define mhd_POISON_MEMORY(pointer, size) (void) 0
+# define mhd_UNPOISON_MEMORY(pointer, size) (void) 0
+/**
+ * Boolean 'true' if the first pointer is less or equal the second pointer
+ */
+# define mp_ptr_le_(p1,p2) \
+ (((const uint8_t*) (p1)) <= ((const uint8_t*) (p2)))
+/**
+ * The difference in bytes between positions of the first and
+ * the second pointers
+ */
+# define mp_ptr_diff_(p1,p2) \
+ ((size_t) (((const uint8_t*) (p1)) - ((const uint8_t*) (p2))))
+#else /* MHD_ASAN_POISON_ACTIVE */
+# define mhd_RED_ZONE_SIZE (mhd_MEMPOOL_ALIGN_SIZE)
+# define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) \
+ (mhd_ROUND_TO_ALIGN (n) + mhd_RED_ZONE_SIZE)
+# define mhd_POISON_MEMORY(pointer, size) \
+ ASAN_POISON_MEMORY_REGION ((pointer), (size))
+# define mhd_UNPOISON_MEMORY(pointer, size) \
+ ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
+# if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS)
+/**
+ * Boolean 'true' if the first pointer is less or equal the second pointer
+ */
+# define mp_ptr_le_(p1,p2) \
+ (((uintptr_t) ((const void*) (p1))) <= \
+ ((uintptr_t) ((const void*) (p2))))
+/**
+ * The difference in bytes between positions of the first and
+ * the second pointers
+ */
+# define mp_ptr_diff_(p1,p2) \
+ ((size_t) (((uintptr_t) ((const uint8_t*) (p1))) - \
+ ((uintptr_t) ((const uint8_t*) (p2)))))
+#elif defined(FUNC_ATTR_PTRCOMPARE_WORKS) && \
+ defined(FUNC_ATTR_PTRSUBTRACT_WORKS)
+# ifndef NDEBUG
+/**
+ * Boolean 'true' if the first pointer is less or equal the second pointer
+ */
+__attribute__((no_sanitize ("pointer-compare"))) static bool
+mp_ptr_le_ (const void *p1, const void *p2)
+{
+ return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
+}
+
+
+# endif /* _DEBUG */
+
+
+/**
+ * The difference in bytes between positions of the first and
+ * the second pointers
+ */
+__attribute__((no_sanitize ("pointer-subtract"))) static size_t
+mp_ptr_diff_ (const void *p1, const void *p2)
+{
+ return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
+}
+
+
+# elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
+# ifndef NDEBUG
+/**
+ * Boolean 'true' if the first pointer is less or equal the second pointer
+ */
+__attribute__((no_sanitize ("address"))) static bool
+mp_ptr_le_ (const void *p1, const void *p2)
+{
+ return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
+}
+
+
+ #endif /* _DEBUG */
+
+/**
+ * The difference in bytes between positions of the first and
+ * the second pointers
+ */
+__attribute__((no_sanitize ("address"))) static size_t
+mp_ptr_diff_ (const void *p1, const void *p2)
+{
+ return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
+}
+
+
+# else /* ! FUNC_ATTR_NOSANITIZE_WORKS */
+#error User-poisoning cannot be used
+# endif /* ! FUNC_ATTR_NOSANITIZE_WORKS */
+#endif /* MHD_ASAN_POISON_ACTIVE */
+
+#ifdef mhd_USE_LARGE_ALLOCS
+/**
+ * Size of memory page
+ */
+static size_t MHD_sys_page_size_ = (size_t)
+# if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
+ PAGESIZE;
+# elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
+ PAGE_SIZE;
+# else /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
+ mhd_FALLBACK_PAGE_SIZE; /* Default fallback value */
+# endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
+#endif /* mhd_USE_LARGE_ALLOCS */
+
+void
+mhd_init_mem_pools (void)
+{
+#ifdef mhd_USE_LARGE_ALLOCS
+#ifdef MHD_SC_PAGESIZE
+ long result;
+ result = sysconf (MHD_SC_PAGESIZE);
+ if (-1 != result)
+ MHD_sys_page_size_ = (size_t) result;
+ else
+ MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE;
+#elif defined(_WIN32)
+ SYSTEM_INFO si;
+ GetSystemInfo (&si);
+ MHD_sys_page_size_ = (size_t) si.dwPageSize;
+#else
+ MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE;
+#endif /* _WIN32 */
+ mhd_assert (0 == (MHD_sys_page_size_ % mhd_MEMPOOL_ALIGN_SIZE));
+#endif /* mhd_USE_LARGE_ALLOCS */
+ (void) 0;
+}
+
+
+/**
+ * Handle for a memory pool. Pools are not reentrant and must not be
+ * used by multiple threads.
+ */
+struct mhd_MemoryPool
+{
+
+ /**
+ * Pointer to the pool's memory
+ */
+ uint8_t *memory;
+
+ /**
+ * Size of the pool.
+ */
+ size_t size;
+
+ /**
+ * Offset of the first unallocated byte.
+ */
+ size_t pos;
+
+ /**
+ * Offset of the byte after the last unallocated byte.
+ */
+ size_t end;
+
+#ifdef mhd_USE_LARGE_ALLOCS
+ /**
+ * 'false' if pool was malloc'ed, 'true' if mmapped (VirtualAlloc'ed for W32).
+ */
+ bool is_large_alloc;
+#endif
+
+ /**
+ * Memory allocation zeroing mode
+ */
+ enum mhd_MemPoolZeroing zeroing;
+};
+
+
+MHD_INTERNAL mhd_FN_RET_UNALIASED mhd_FN_OBJ_CONSTRUCTOR(mhd_pool_destroy)
+struct mhd_MemoryPool *
+mhd_pool_create (size_t max,
+ enum mhd_MemPoolZeroing zeroing)
+{
+ struct mhd_MemoryPool *pool;
+ size_t alloc_size;
+
+ mhd_assert (max > 0);
+ alloc_size = 0;
+ pool = (struct mhd_MemoryPool *) malloc (sizeof (struct mhd_MemoryPool));
+ if (NULL == pool)
+ return NULL;
+ pool->zeroing = zeroing;
+#ifdef mhd_USE_LARGE_ALLOCS
+ pool->is_large_alloc = false;
+ if ( (max <= 32 * 1024) ||
+ (max < MHD_sys_page_size_ * 4 / 3) )
+ {
+ pool->memory = (uint8_t *) mhd_MAP_FAILED;
+ }
+ else
+ {
+ /* Round up allocation to page granularity. */
+ alloc_size = max + MHD_sys_page_size_ - 1;
+ alloc_size -= alloc_size % MHD_sys_page_size_;
+ pool->is_large_alloc = true;
+# if defined(mhd_MAP_ANONYMOUS)
+ pool->memory = (uint8_t *) mmap (NULL,
+ alloc_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS,
+ -1,
+ 0);
+# else /* ! mhd_MAP_ANONYMOUS */
+ pool->memory = (uint8_t *) VirtualAlloc (NULL,
+ alloc_size,
+ MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+# endif /* ! mhd_MAP_ANONYMOUS */
+ }
+#else /* !mhd_USE_LARGE_ALLOCS */
+ if (mhd_MAP_FAILED != pool->memory)
+ pool->is_large_alloc = true;
+ else
+#endif /* !mhd_USE_LARGE_ALLOCS*/
+ if (! 0)
+ {
+ alloc_size = mhd_ROUND_TO_ALIGN (max);
+ if (MHD_MEMPOOL_ZEROING_NEVER == zeroing)
+ pool->memory = (uint8_t *) malloc (alloc_size);
+ else
+ pool->memory = (uint8_t *) mhd_calloc (1, alloc_size);
+ if (((uint8_t *) NULL) == pool->memory)
+ {
+ free (pool);
+ return NULL;
+ }
+ }
+ mhd_assert (0 == (((uintptr_t) pool->memory) % mhd_MEMPOOL_ALIGN_SIZE));
+ pool->pos = 0;
+ pool->end = alloc_size;
+ pool->size = alloc_size;
+ mhd_assert (0 < alloc_size);
+ mhd_POISON_MEMORY (pool->memory, pool->size);
+ return pool;
+}
+
+
+MHD_INTERNAL void
+mhd_pool_destroy (struct mhd_MemoryPool *restrict pool)
+{
+ if (NULL == pool)
+ return;
+
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
+ mhd_UNPOISON_MEMORY (pool->memory, pool->size);
+#ifdef mhd_USE_LARGE_ALLOCS
+ if (pool->is_large_alloc)
+ {
+# if defined(mhd_MAP_ANONYMOUS)
+ munmap (pool->memory,
+ pool->size);
+# else
+ VirtualFree (pool->memory,
+ 0,
+ MEM_RELEASE);
+# endif
+ }
+ else
+#endif /* mhd_USE_LARGE_ALLOCS*/
+ if (! 0)
+ free (pool->memory);
+
+ free (pool);
+}
+
+
+MHD_INTERNAL size_t
+mhd_pool_get_free (struct mhd_MemoryPool *restrict pool)
+{
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
+#ifdef MHD_ASAN_POISON_ACTIVE
+ if ((pool->end - pool->pos) <= mhd_RED_ZONE_SIZE)
+ return 0;
+#endif /* MHD_ASAN_POISON_ACTIVE */
+ return (pool->end - pool->pos) - mhd_RED_ZONE_SIZE;
+}
+
+
+MHD_INTERNAL mhd_FN_RET_UNALIASED mhd_FN_RET_SIZED(2)
+mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE) void *
+mhd_pool_allocate (struct mhd_MemoryPool *restrict pool,
+ size_t size,
+ bool from_end)
+{
+ void *ret;
+ size_t asize;
+
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
+ asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
+ if ( (0 == asize) && (0 != size) )
+ return NULL; /* size too close to SIZE_MAX */
+ if (asize > pool->end - pool->pos)
+ return NULL;
+ if (from_end)
+ {
+ ret = &pool->memory[pool->end - asize];
+ pool->end -= asize;
+ }
+ else
+ {
+ ret = &pool->memory[pool->pos];
+ pool->pos += asize;
+ }
+ mhd_UNPOISON_MEMORY (ret, size);
+ return ret;
+}
+
+
+MHD_INTERNAL bool
+mhd_pool_is_resizable_inplace (struct mhd_MemoryPool *restrict pool,
+ void *restrict block,
+ size_t block_size)
+{
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (block != NULL || block_size == 0);
+ mhd_assert (pool->size >= block_size);
+ if (NULL != block)
+ {
+ const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
+ mhd_assert (mp_ptr_le_ (pool->memory, block));
+ mhd_assert (pool->size >= block_offset);
+ mhd_assert (pool->size >= block_offset + block_size);
+ return (pool->pos ==
+ mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size));
+ }
+ return false; /* Unallocated blocks cannot be resized in-place */
+}
+
+
+MHD_INTERNAL mhd_FN_RET_UNALIASED mhd_FN_RET_SIZED(2)
+mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE) void *
+mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool,
+ size_t size,
+ size_t *restrict required_bytes)
+{
+ void *ret;
+ size_t asize;
+
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
+ asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
+ if ( (0 == asize) && (0 != size) )
+ { /* size is too close to SIZE_MAX, very unlikely */
+ *required_bytes = SIZE_MAX;
+ return NULL;
+ }
+ if (asize > pool->end - pool->pos)
+ {
+ mhd_assert ((pool->end - pool->pos) == \
+ mhd_ROUND_TO_ALIGN (pool->end - pool->pos));
+ if (asize <= pool->end)
+ *required_bytes = asize - (pool->end - pool->pos);
+ else
+ *required_bytes = SIZE_MAX;
+ return NULL;
+ }
+ *required_bytes = 0;
+ ret = &pool->memory[pool->end - asize];
+ pool->end -= asize;
+ mhd_UNPOISON_MEMORY (ret, size);
+ return ret;
+}
+
+
+MHD_INTERNAL mhd_FN_RET_SIZED(4) mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE)
+void *
+mhd_pool_reallocate (struct mhd_MemoryPool *restrict pool,
+ void *restrict old,
+ size_t old_size,
+ size_t new_size)
+{
+ size_t asize;
+ uint8_t *new_blc;
+
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (old != NULL || old_size == 0);
+ mhd_assert (pool->size >= old_size);
+ mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
+#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
+ mhd_assert (NULL == __asan_region_is_poisoned (old, old_size));
+#endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
+
+ if (NULL != old)
+ { /* Have previously allocated data */
+ const size_t old_offset = mp_ptr_diff_ (old, pool->memory);
+ const bool shrinking = (old_size > new_size);
+
+ mhd_assert (mp_ptr_le_ (pool->memory, old));
+ /* (pool->memory + pool->size >= (uint8_t*) old + old_size) */
+ mhd_assert ((pool->size - mhd_RED_ZONE_SIZE) >= (old_offset + old_size));
+ /* Blocks "from the end" must not be reallocated */
+ /* (old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */
+ mhd_assert ((old_size == 0) || \
+ (pool->pos > old_offset));
+ mhd_assert ((old_size == 0) || \
+ ((pool->end - mhd_RED_ZONE_SIZE) >= (old_offset + old_size)));
+ /* Try resizing in-place */
+ if (shrinking)
+ { /* Shrinking in-place, zero-out freed part */
+ if (MHD_MEMPOOL_ZEROING_ON_RESET < pool->zeroing)
+ memset ((uint8_t *) old + new_size, 0, old_size - new_size);
+ mhd_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size);
+ }
+ if (pool->pos ==
+ mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size))
+ { /* "old" block is the last allocated block */
+ const size_t new_apos =
+ mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size);
+ if (! shrinking)
+ { /* Grow in-place, check for enough space. */
+ if ( (new_apos > pool->end) ||
+ (new_apos < pool->pos) ) /* Value wrap */
+ return NULL; /* No space */
+ }
+ /* Resized in-place */
+ pool->pos = new_apos;
+ mhd_UNPOISON_MEMORY (old, new_size);
+ return old;
+ }
+ if (shrinking)
+ return old; /* Resized in-place, freed part remains allocated */
+ }
+ /* Need to allocate new block */
+ asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
+ if ( ( (0 == asize) &&
+ (0 != new_size) ) || /* Value wrap, too large new_size. */
+ (asize > pool->end - pool->pos) ) /* Not enough space */
+ return NULL;
+
+ new_blc = pool->memory + pool->pos;
+ pool->pos += asize;
+
+ mhd_UNPOISON_MEMORY (new_blc, new_size);
+ if (0 != old_size)
+ {
+ /* Move data to new block, old block remains allocated */
+ memcpy (new_blc, old, old_size);
+ /* Zero-out old block */
+ if (MHD_MEMPOOL_ZEROING_ON_RESET < pool->zeroing)
+ memset (old, 0, old_size);
+ mhd_POISON_MEMORY (old, old_size);
+ }
+ return new_blc;
+}
+
+
+MHD_INTERNAL void
+mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool,
+ void *restrict block,
+ size_t block_size)
+{
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (block != NULL || block_size == 0);
+ mhd_assert (pool->size >= block_size);
+ mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
+
+ if (NULL != block)
+ { /* Have previously allocated data */
+ const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
+ mhd_assert (mp_ptr_le_ (pool->memory, block));
+ mhd_assert (block_offset <= pool->size);
+ mhd_assert ((block_offset != pool->pos) || (block_size == 0));
+ /* Zero-out deallocated region */
+ if (0 != block_size)
+ {
+ if (MHD_MEMPOOL_ZEROING_ON_RESET < pool->zeroing)
+ memset (block, 0, block_size);
+ mhd_POISON_MEMORY (block, block_size);
+ }
+#if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE)
+ else
+ return; /* Zero size, no need to do anything */
+#endif /* ! MHD_FAVOR_SMALL_CODE && ! MHD_ASAN_POISON_ACTIVE */
+ if (block_offset <= pool->pos)
+ {
+ /* "Normal" block, not allocated "from the end". */
+ const size_t alg_end =
+ mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
+ mhd_assert (alg_end <= pool->pos);
+ if (alg_end == pool->pos)
+ {
+ /* The last allocated block, return deallocated block to the pool */
+ size_t alg_start = mhd_ROUND_TO_ALIGN (block_offset);
+ mhd_assert (alg_start >= block_offset);
+#if defined(MHD_ASAN_POISON_ACTIVE)
+ if (alg_start != block_offset)
+ {
+ mhd_POISON_MEMORY (pool->memory + block_offset, \
+ alg_start - block_offset);
+ }
+ else if (0 != alg_start)
+ {
+ bool need_red_zone_before;
+ mhd_assert (mhd_RED_ZONE_SIZE <= alg_start);
+#if defined(HAVE___ASAN_REGION_IS_POISONED)
+ need_red_zone_before =
+ (NULL == __asan_region_is_poisoned (pool->memory
+ + alg_start
+ - mhd_RED_ZONE_SIZE,
+ mhd_RED_ZONE_SIZE));
+#elif defined(HAVE___ASAN_ADDRESS_IS_POISONED)
+ need_red_zone_before =
+ (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1));
+#else /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
+ need_red_zone_before = true; /* Unknown, assume new red zone needed */
+#endif /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
+ if (need_red_zone_before)
+ {
+ mhd_POISON_MEMORY (pool->memory + alg_start, mhd_RED_ZONE_SIZE);
+ alg_start += mhd_RED_ZONE_SIZE;
+ }
+ }
+#endif /* MHD_ASAN_POISON_ACTIVE */
+ mhd_assert (alg_start <= pool->pos);
+ mhd_assert (alg_start == mhd_ROUND_TO_ALIGN (alg_start));
+ pool->pos = alg_start;
+ }
+ }
+ else
+ {
+ /* Allocated "from the end" block. */
+ /* The size and the pointers of such block should not be manipulated by
+ MHD code (block split is disallowed). */
+ mhd_assert (block_offset >= pool->end);
+ mhd_assert (mhd_ROUND_TO_ALIGN (block_offset) == block_offset);
+ if (block_offset == pool->end)
+ {
+ /* The last allocated block, return deallocated block to the pool */
+ const size_t alg_end =
+ mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
+ pool->end = alg_end;
+ }
+ }
+ }
+}
+
+
+MHD_INTERNAL mhd_FN_RET_SIZED(4) mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE)
+void *
+mhd_pool_reset (struct mhd_MemoryPool *restrict pool,
+ void *restrict keep,
+ size_t copy_bytes,
+ size_t new_size)
+{
+ mhd_assert (pool->end >= pool->pos);
+ mhd_assert (pool->size >= pool->end - pool->pos);
+ mhd_assert (copy_bytes <= new_size);
+ mhd_assert (copy_bytes <= pool->size);
+ mhd_assert (keep != NULL || copy_bytes == 0);
+ mhd_assert (keep == NULL || mp_ptr_le_ (pool->memory, keep));
+ /* (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep + copy_bytes) */
+ mhd_assert ((keep == NULL) || \
+ (pool->size >= mp_ptr_diff_ (keep, pool->memory) + copy_bytes));
+#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
+ mhd_assert (NULL == __asan_region_is_poisoned (keep, copy_bytes));
+#endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
+ mhd_UNPOISON_MEMORY (pool->memory, new_size);
+ if ( (NULL != keep) &&
+ (keep != pool->memory) )
+ {
+ if (0 != copy_bytes)
+ memmove (pool->memory,
+ keep,
+ copy_bytes);
+ }
+ if ((MHD_MEMPOOL_ZEROING_NEVER != pool->zeroing) &&
+ (pool->size > copy_bytes))
+ {
+ size_t to_zero; /** Size of area to zero-out */
+
+ to_zero = pool->size - copy_bytes;
+ mhd_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero);
+#if defined(mhd_USE_LARGE_ALLOCS) && defined(_WIN32)
+ if (pool->is_large_alloc)
+ {
+ size_t to_recommit; /** Size of decommitted and re-committed area. */
+ uint8_t *recommit_addr;
+ /* Round down to page size */
+ to_recommit = to_zero - to_zero % MHD_sys_page_size_;
+ recommit_addr = pool->memory + pool->size - to_recommit;
+
+ /* De-committing and re-committing again clear memory and make
+ * pages free / available for other needs until accessed. */
+ if (VirtualFree (recommit_addr,
+ to_recommit,
+ MEM_DECOMMIT))
+ {
+ to_zero -= to_recommit;
+
+ if (recommit_addr != VirtualAlloc (recommit_addr,
+ to_recommit,
+ MEM_COMMIT,
+ PAGE_READWRITE))
+ abort (); /* Serious error, must never happen */
+ }
+ }
+#endif /* mhd_USE_LARGE_ALLOCS && _WIN32 */
+ memset (&pool->memory[copy_bytes],
+ 0,
+ to_zero);
+ }
+ pool->pos = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
+ pool->end = pool->size;
+ mhd_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \
+ pool->size - new_size);
+ return pool->memory;
+}
+
+
+/* end of memorypool.c */
diff --git a/src/mhd2/mempool_funcs.h b/src/mhd2/mempool_funcs.h
@@ -0,0 +1,208 @@
+/*
+ This file is part of libmicrohttpd
+ Copyright (C) 2007--2024 Daniel Pittman and Christian Grothoff
+ Copyright (C) 2016--2025 Evgeny Grin (Karlson2k)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+/**
+ * @file src/mhd2/mempool_funcs.h
+ * @brief memory pool; mostly used for efficient (de)allocation
+ * for each connection and bounding memory use for each
+ * request
+ * @author Christian Grothoff
+ * @author Karlson2k (Evgeny Grin)
+ */
+
+#ifndef MHD_MEMPOOL_FUNCS_H
+#define MHD_MEMPOOL_FUNCS_H 1
+
+#include "mhd_sys_options.h"
+#include "sys_base_types.h"
+#include "sys_bool_type.h"
+#include "mempool_types.h"
+
+/**
+ * Alignment size used by memory pool function.
+ * This is 2x pointer size (similar to GNU libc).
+ */
+#define mhd_MEMPOOL_ALIGN_SIZE (2 * sizeof(void*))
+
+/**
+ * Perform one-time initialisation of the internal values required for
+ * memory pools functions
+ */
+void
+mhd_init_mem_pools (void);
+
+
+/**
+ * Destroy a memory pool.
+ *
+ * @param pool memory pool to destroy
+ */
+MHD_INTERNAL void
+mhd_pool_destroy (struct mhd_MemoryPool *restrict pool);
+
+
+/**
+ * Create a memory pool.
+ *
+ * @param max maximum size of the pool
+ * @param zeroing the request zeroing of allocated and deallocated memory
+ * @return pointer to the new object on success,
+ * NULL on error
+ */
+MHD_INTERNAL struct mhd_MemoryPool *
+mhd_pool_create (size_t max,
+ enum mhd_MemPoolZeroing zeroing)
+mhd_FN_RET_UNALIASED mhd_FN_OBJ_CONSTRUCTOR(mhd_pool_destroy);
+
+
+/**
+ * Allocate size bytes from the pool.
+ *
+ * @param pool memory pool to use for the operation
+ * @param size number of bytes to allocate
+ * @param from_end allocate from end of pool (set to 'true');
+ * use this for small, persistent allocations that
+ * will not be reallocated until pool reset
+ * @return pointer to the new allocated memory region on success,
+ * NULL if the pool does not have enough free memory.
+ */
+MHD_INTERNAL void *
+mhd_pool_allocate (struct mhd_MemoryPool *restrict pool,
+ size_t size,
+ bool from_end)
+mhd_FN_RET_UNALIASED mhd_FN_RET_SIZED(2)
+mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE);
+
+/**
+ * Checks whether allocated block is re-sizable in-place.
+ * If block is not re-sizable in-place, it still could be shrunk, but freed
+ * memory will not be re-used until reset of the pool.
+ * @param pool the memory pool to use
+ * @param block the pointer to the allocated block to check
+ * @param block_size the size of the allocated @a block
+ * @return true if block can be resized in-place in the optimal way,
+ * false otherwise
+ */
+MHD_INTERNAL bool
+mhd_pool_is_resizable_inplace (struct mhd_MemoryPool *restrict pool,
+ void *restrict block,
+ size_t block_size);
+
+/**
+ * Try to allocate @a size bytes memory area from the @a pool.
+ *
+ * If allocation fails, @a required_bytes is updated with size required to be
+ * freed in the @a pool from rellocatable area to allocate requested number
+ * of bytes.
+ * Allocated memory area is always not rellocatable ("from end").
+ *
+ * @param pool memory pool to use for the operation
+ * @param size the size of memory in bytes to allocate
+ * @param[out] required_bytes the pointer to variable to be updated with
+ * the size of the required additional free
+ * memory area, set to 0 if function succeeds.
+ * Cannot be NULL.
+ * @return the pointer to allocated memory area if succeed,
+ * NULL if the pool doesn't have enough space, required_bytes is updated
+ * with amount of space needed to be freed in rellocatable area or
+ * set to SIZE_MAX if requested size is too large for the pool.
+ */
+MHD_INTERNAL void *
+mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool,
+ size_t size,
+ size_t *restrict required_bytes)
+mhd_FN_RET_UNALIASED mhd_FN_RET_SIZED(2)
+mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE);
+
+
+/**
+ * Reallocate a block of memory obtained from the pool.
+ * This is particularly efficient when growing or
+ * shrinking the block that was last (re)allocated.
+ * If the given block is not the most recently
+ * (re)allocated block, the memory of the previous
+ * allocation may be not released until the pool is
+ * destroyed or reset.
+ *
+ * @param pool memory pool to use for the operation
+ * @param old the existing block
+ * @param old_size the size of the existing block
+ * @param new_size the new size of the block
+ * @return new address of the block, or
+ * NULL if the pool cannot support @a new_size
+ * bytes (old continues to be valid for @a old_size)
+ */
+MHD_INTERNAL void *
+mhd_pool_reallocate (struct mhd_MemoryPool *restrict pool,
+ void *restrict old,
+ size_t old_size,
+ size_t new_size)
+mhd_FN_RET_SIZED(4) mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE);
+
+
+/**
+ * Check how much memory is left in the @a pool
+ *
+ * @param pool pool to check
+ * @return number of bytes still available in @a pool
+ */
+MHD_INTERNAL size_t
+mhd_pool_get_free (struct mhd_MemoryPool *restrict pool);
+
+
+/**
+ * Deallocate a block of memory obtained from the pool.
+ *
+ * If the given block is not the most recently
+ * (re)allocated block, the memory of the this block
+ * allocation may be not released until the pool is
+ * destroyed or reset.
+ *
+ * @param pool memory pool to use for the operation
+ * @param block the allocated block, the NULL is tolerated
+ * @param block_size the size of the allocated block
+ */
+MHD_INTERNAL void
+mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool,
+ void *restrict block,
+ size_t block_size);
+
+
+/**
+ * Clear all entries from the memory pool except
+ * for @a keep of the given @a copy_bytes. The pointer
+ * returned should be a buffer of @a new_size where
+ * the first @a copy_bytes are from @a keep.
+ *
+ * @param pool memory pool to use for the operation
+ * @param keep pointer to the entry to keep (maybe NULL)
+ * @param copy_bytes how many bytes need to be kept at this address
+ * @param new_size how many bytes should the allocation we return have?
+ * (should be larger or equal to @a copy_bytes)
+ * @return addr new address of @a keep (if it had to change)
+ */
+MHD_INTERNAL void *
+mhd_pool_reset (struct mhd_MemoryPool *restrict pool,
+ void *restrict keep,
+ size_t copy_bytes,
+ size_t new_size)
+mhd_FN_RET_SIZED(4) mhd_FN_RET_ALIGNED(mhd_MEMPOOL_ALIGN_SIZE);
+
+#endif /* ! MHD_MEMPOOL_FUNCS_H */
diff --git a/src/mhd2/mempool_types.h b/src/mhd2/mempool_types.h
@@ -0,0 +1,64 @@
+/*
+ This file is part of GNU libmicrohttpd
+ Copyright (C) 2025 Evgeny Grin (Karlson2k)
+
+ GNU libmicrohttpd is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ GNU libmicrohttpd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see
+ <https://www.gnu.org/licenses/>.
+
+*/
+
+/**
+ * @file src/mhd2/mempool_types.h
+ * @brief Special types for use with memory pool functions
+ * @author Karlson2k (Evgeny Grin)
+ */
+
+#ifndef MHD_MEMPOOL_TYPES_H
+#define MHD_MEMPOOL_TYPES_H 1
+
+#include "mhd_sys_options.h"
+
+/**
+ * Opaque handle for a memory pool.
+ * Pools are not reentrant and must not be used
+ * by multiple threads.
+ */
+struct mhd_MemoryPool;
+
+/**
+ * Controls zeroing of allocated and deallocated memory
+ */
+enum MHD_FIXED_ENUM_ mhd_MemPoolZeroing
+{
+ /**
+ * Never zero the memory.
+ * Allocated memory may contain garbage.
+ * Deallocated regions are not wiped.
+ */
+ MHD_MEMPOOL_ZEROING_NEVER = 0
+ ,
+ /**
+ * Zero memory on pool initialisation and on every reset operation.
+ * Deallocated and re-allocated memory areas are not wiped.
+ */
+ MHD_MEMPOOL_ZEROING_ON_RESET
+ ,
+ /**
+ * Zero memory on pool creation, on every reset and
+ * on every deallocation/re-allocation.
+ */
+ MHD_MEMPOOL_ZEROING_ALWAYS
+};
+
+#endif /* ! MHD_MEMPOOL_TYPES_H */
diff --git a/src/mhd2/mhd_daemon.h b/src/mhd2/mhd_daemon.h
@@ -58,6 +58,8 @@
# include <sys/epoll.h>
#endif
+#include "mempool_types.h"
+
#include "mhd_public_api.h"
struct DaemonOptions; /* Forward declaration */
@@ -897,6 +899,11 @@ struct mhd_DaemonConnectionsSettings
* Connection's memory pool size
*/
size_t mem_pool_size;
+
+ /**
+ * Memory pool zeroing mode
+ */
+ enum mhd_MemPoolZeroing mem_pool_zeroing;
};
#ifdef MHD_SUPPORT_UPGRADE
diff --git a/src/mhd2/mhd_mempool.c b/src/mhd2/mhd_mempool.c
@@ -1,813 +0,0 @@
-/*
- This file is part of libmicrohttpd
- Copyright (C) 2007--2024 Daniel Pittman and Christian Grothoff
- Copyright (C) 2014--2024 Evgeny Grin (Karlson2k)
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-*/
-
-/**
- * @file src/mhd2/mhd_mempool.h
- * @brief memory pool
- * @author Christian Grothoff
- * @author Karlson2k (Evgeny Grin)
- * TODO:
- * + Update code style
- * + Detect mmap() in configure (it is purely optional!)
- */
-#include "mhd_sys_options.h"
-
-#include "mhd_mempool.h"
-#ifdef HAVE_STDLIB_H
-# include <stdlib.h>
-#endif /* HAVE_STDLIB_H */
-#include <string.h>
-#include "mhd_assert.h"
-#ifdef HAVE_SYS_MMAN_H
-# include <sys/mman.h>
-#endif
-#ifdef _WIN32
-# include <windows.h>
-#endif
-#ifdef HAVE_SYSCONF
-# include <unistd.h>
-# if defined(_SC_PAGE_SIZE)
-# define MHD_SC_PAGESIZE _SC_PAGE_SIZE
-# elif defined(_SC_PAGESIZE)
-# define MHD_SC_PAGESIZE _SC_PAGESIZE
-# endif /* _SC_PAGESIZE */
-#endif /* HAVE_SYSCONF */
-
-#if defined(MHD_USE_PAGESIZE_MACRO) || defined(MHD_USE_PAGE_SIZE_MACRO)
-# ifndef HAVE_SYSCONF /* Avoid duplicate include */
-# include <unistd.h>
-# endif /* HAVE_SYSCONF */
-# ifdef HAVE_LIMITS_H
-# include <limits.h>
-# endif
-# ifdef HAVE_SYS_PARAM_H
-# include <sys/param.h>
-# endif /* HAVE_SYS_PARAM_H */
-#endif /* MHD_USE_PAGESIZE_MACRO || MHD_USE_PAGE_SIZE_MACRO */
-
-#include "mhd_limits.h"
-
-#ifndef mhd_FALLBACK_PAGE_SIZE
-/**
- * Fallback value of page size
- */
-# define mhd_FALLBACK_PAGE_SIZE (4096)
-#endif
-
-#if defined(MHD_USE_PAGESIZE_MACRO)
-# define mhd_DEF_PAGE_SIZE PAGESIZE
-#elif defined(MHD_USE_PAGE_SIZE_MACRO)
-# define mhd_DEF_PAGE_SIZE PAGE_SIZE
-#else /* ! PAGESIZE */
-# define mhd_DEF_PAGE_SIZE mhd_FALLBACK_PAGE_SIZE
-#endif /* ! PAGESIZE */
-
-
-#ifdef MHD_ASAN_POISON_ACTIVE
-#include <sanitizer/asan_interface.h>
-#endif /* MHD_ASAN_POISON_ACTIVE */
-
-/* define MAP_ANONYMOUS for Mac OS X */
-#if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-#if defined(_WIN32)
-#define MAP_FAILED NULL
-#elif ! defined(MAP_FAILED)
-#define MAP_FAILED ((void*) -1)
-#endif
-
-/**
- * Align to 2x word size (as GNU libc does).
- */
-#define ALIGN_SIZE (2 * sizeof(void*))
-
-/**
- * Round up 'n' to a multiple of ALIGN_SIZE.
- */
-#define mhd_ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
- / (ALIGN_SIZE) *(ALIGN_SIZE))
-
-
-#ifndef MHD_ASAN_POISON_ACTIVE
-# define mhd_NOSANITIZE_PTRS /**/
-# define mhd_RED_ZONE_SIZE (0)
-# define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) mhd_ROUND_TO_ALIGN (n)
-# define mhd_POISON_MEMORY(pointer, size) (void) 0
-# define mhd_UNPOISON_MEMORY(pointer, size) (void) 0
-/**
- * Boolean 'true' if the first pointer is less or equal the second pointer
- */
-# define mp_ptr_le_(p1,p2) \
- (((const uint8_t*) (p1)) <= ((const uint8_t*) (p2)))
-/**
- * The difference in bytes between positions of the first and
- * the second pointers
- */
-# define mp_ptr_diff_(p1,p2) \
- ((size_t) (((const uint8_t*) (p1)) - ((const uint8_t*) (p2))))
-#else /* MHD_ASAN_POISON_ACTIVE */
-# define mhd_RED_ZONE_SIZE (ALIGN_SIZE)
-# define mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE(n) \
- (mhd_ROUND_TO_ALIGN (n) + mhd_RED_ZONE_SIZE)
-# define mhd_POISON_MEMORY(pointer, size) \
- ASAN_POISON_MEMORY_REGION ((pointer), (size))
-# define mhd_UNPOISON_MEMORY(pointer, size) \
- ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
-# if defined(FUNC_PTRCOMPARE_CAST_WORKAROUND_WORKS)
-/**
- * Boolean 'true' if the first pointer is less or equal the second pointer
- */
-# define mp_ptr_le_(p1,p2) \
- (((uintptr_t) ((const void*) (p1))) <= \
- ((uintptr_t) ((const void*) (p2))))
-/**
- * The difference in bytes between positions of the first and
- * the second pointers
- */
-# define mp_ptr_diff_(p1,p2) \
- ((size_t) (((uintptr_t) ((const uint8_t*) (p1))) - \
- ((uintptr_t) ((const uint8_t*) (p2)))))
-#elif defined(FUNC_ATTR_PTRCOMPARE_WORKS) && \
- defined(FUNC_ATTR_PTRSUBTRACT_WORKS)
-# ifndef NDEBUG
-/**
- * Boolean 'true' if the first pointer is less or equal the second pointer
- */
-__attribute__((no_sanitize ("pointer-compare"))) static bool
-mp_ptr_le_ (const void *p1, const void *p2)
-{
- return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
-}
-
-
-# endif /* _DEBUG */
-
-
-/**
- * The difference in bytes between positions of the first and
- * the second pointers
- */
-__attribute__((no_sanitize ("pointer-subtract"))) static size_t
-mp_ptr_diff_ (const void *p1, const void *p2)
-{
- return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
-}
-
-
-# elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
-# ifndef NDEBUG
-/**
- * Boolean 'true' if the first pointer is less or equal the second pointer
- */
-__attribute__((no_sanitize ("address"))) static bool
-mp_ptr_le_ (const void *p1, const void *p2)
-{
- return (((const uint8_t *) p1) <= ((const uint8_t *) p2));
-}
-
-
- #endif /* _DEBUG */
-
-/**
- * The difference in bytes between positions of the first and
- * the second pointers
- */
-__attribute__((no_sanitize ("address"))) static size_t
-mp_ptr_diff_ (const void *p1, const void *p2)
-{
- return (size_t) (((const uint8_t *) p1) - ((const uint8_t *) p2));
-}
-
-
-# else /* ! FUNC_ATTR_NOSANITIZE_WORKS */
-#error User-poisoning cannot be used
-# endif /* ! FUNC_ATTR_NOSANITIZE_WORKS */
-#endif /* MHD_ASAN_POISON_ACTIVE */
-
-/**
- * Size of memory page
- */
-static size_t MHD_sys_page_size_ = (size_t)
-#if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
- PAGESIZE;
-#elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
- PAGE_SIZE;
-#else /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
- mhd_FALLBACK_PAGE_SIZE; /* Default fallback value */
-#endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
-
-/**
- * Initialise values for memory pools
- */
-void
-mhd_init_mem_pools (void)
-{
-#ifdef MHD_SC_PAGESIZE
- long result;
- result = sysconf (MHD_SC_PAGESIZE);
- if (-1 != result)
- MHD_sys_page_size_ = (size_t) result;
- else
- MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE;
-#elif defined(_WIN32)
- SYSTEM_INFO si;
- GetSystemInfo (&si);
- MHD_sys_page_size_ = (size_t) si.dwPageSize;
-#else
- MHD_sys_page_size_ = (size_t) mhd_DEF_PAGE_SIZE;
-#endif /* _WIN32 */
- mhd_assert (0 == (MHD_sys_page_size_ % ALIGN_SIZE));
-}
-
-
-/**
- * Handle for a memory pool. Pools are not reentrant and must not be
- * used by multiple threads.
- */
-struct mhd_MemoryPool
-{
-
- /**
- * Pointer to the pool's memory
- */
- uint8_t *memory;
-
- /**
- * Size of the pool.
- */
- size_t size;
-
- /**
- * Offset of the first unallocated byte.
- */
- size_t pos;
-
- /**
- * Offset of the byte after the last unallocated byte.
- */
- size_t end;
-
- /**
- * 'false' if pool was malloc'ed, 'true' if mmapped (VirtualAlloc'ed for W32).
- */
- bool is_mmap;
-
- // TODO: implement *optional* zeroing on reset on reallocs
-};
-
-
-/**
- * Create a memory pool.
- *
- * @param max maximum size of the pool
- * @return NULL on error
- */
-MHD_INTERNAL struct mhd_MemoryPool *
-mdh_pool_create (size_t max)
-{
- struct mhd_MemoryPool *pool;
- size_t alloc_size;
-
- mhd_assert (max > 0);
- alloc_size = 0;
- pool = (struct mhd_MemoryPool *) malloc (sizeof (struct mhd_MemoryPool));
- if (NULL == pool)
- return NULL;
-#if defined(MAP_ANONYMOUS) || defined(_WIN32)
- if ( (max <= 32 * 1024) ||
- (max < MHD_sys_page_size_ * 4 / 3) )
- {
- pool->memory = (uint8_t *) MAP_FAILED;
- }
- else
- {
- /* Round up allocation to page granularity. */
- alloc_size = max + MHD_sys_page_size_ - 1;
- alloc_size -= alloc_size % MHD_sys_page_size_;
-#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
- pool->memory = (uint8_t *) mmap (NULL,
- alloc_size,
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS,
- -1,
- 0);
-#elif defined(_WIN32)
- pool->memory = (uint8_t *) VirtualAlloc (NULL,
- alloc_size,
- MEM_COMMIT | MEM_RESERVE,
- PAGE_READWRITE);
-#endif /* _WIN32 */
- }
-#else /* ! _WIN32 && ! MAP_ANONYMOUS */
- pool->memory = (uint8_t *) MAP_FAILED;
-#endif /* ! _WIN32 && ! MAP_ANONYMOUS */
- if (MAP_FAILED == pool->memory)
- {
- alloc_size = mhd_ROUND_TO_ALIGN (max);
- pool->memory = (uint8_t *) malloc (alloc_size);
- if (((uint8_t *) NULL) == pool->memory)
- {
- free (pool);
- return NULL;
- }
- pool->is_mmap = false;
- }
-#if defined(MAP_ANONYMOUS) || defined(_WIN32)
- else
- {
- pool->is_mmap = true;
- }
-#endif /* _WIN32 || MAP_ANONYMOUS */
- mhd_assert (0 == (((uintptr_t) pool->memory) % ALIGN_SIZE));
- pool->pos = 0;
- pool->end = alloc_size;
- pool->size = alloc_size;
- mhd_assert (0 < alloc_size);
- mhd_POISON_MEMORY (pool->memory, pool->size);
- return pool;
-}
-
-
-/**
- * Destroy a memory pool.
- *
- * @param pool memory pool to destroy
- */
-MHD_INTERNAL void
-mhd_pool_destroy (struct mhd_MemoryPool *restrict pool)
-{
- if (NULL == pool)
- return;
-
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
- mhd_UNPOISON_MEMORY (pool->memory, pool->size);
- if (! pool->is_mmap)
- free (pool->memory);
- else
-#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
- munmap (pool->memory,
- pool->size);
-#elif defined(_WIN32)
- VirtualFree (pool->memory,
- 0,
- MEM_RELEASE);
-#else
- abort ();
-#endif
- free (pool);
-}
-
-
-/**
- * Check how much memory is left in the @a pool
- *
- * @param pool pool to check
- * @return number of bytes still available in @a pool
- */
-MHD_INTERNAL size_t
-mhd_pool_get_free (struct mhd_MemoryPool *restrict pool)
-{
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
-#ifdef MHD_ASAN_POISON_ACTIVE
- if ((pool->end - pool->pos) <= mhd_RED_ZONE_SIZE)
- return 0;
-#endif /* MHD_ASAN_POISON_ACTIVE */
- return (pool->end - pool->pos) - mhd_RED_ZONE_SIZE;
-}
-
-
-/**
- * Allocate size bytes from the pool.
- *
- * @param pool memory pool to use for the operation
- * @param size number of bytes to allocate
- * @param from_end allocate from end of pool (set to 'true');
- * use this for small, persistent allocations that
- * will never be reallocated
- * @return NULL if the pool cannot support size more
- * bytes
- */
-MHD_INTERNAL void *
-mhd_pool_allocate (struct mhd_MemoryPool *restrict pool,
- size_t size,
- bool from_end)
-{
- void *ret;
- size_t asize;
-
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
- asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
- if ( (0 == asize) && (0 != size) )
- return NULL; /* size too close to SIZE_MAX */
- if (asize > pool->end - pool->pos)
- return NULL;
- if (from_end)
- {
- ret = &pool->memory[pool->end - asize];
- pool->end -= asize;
- }
- else
- {
- ret = &pool->memory[pool->pos];
- pool->pos += asize;
- }
- mhd_UNPOISON_MEMORY (ret, size);
- return ret;
-}
-
-
-/**
- * Checks whether allocated block is re-sizable in-place.
- * If block is not re-sizable in-place, it still could be shrunk, but freed
- * memory will not be re-used until reset of the pool.
- * @param pool the memory pool to use
- * @param block the pointer to the allocated block to check
- * @param block_size the size of the allocated @a block
- * @return true if block can be resized in-place in the optimal way,
- * false otherwise
- */
-MHD_INTERNAL bool
-mhd_pool_is_resizable_inplace (struct mhd_MemoryPool *restrict pool,
- void *restrict block,
- size_t block_size)
-{
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (block != NULL || block_size == 0);
- mhd_assert (pool->size >= block_size);
- if (NULL != block)
- {
- const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
- mhd_assert (mp_ptr_le_ (pool->memory, block));
- mhd_assert (pool->size >= block_offset);
- mhd_assert (pool->size >= block_offset + block_size);
- return (pool->pos ==
- mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size));
- }
- return false; /* Unallocated blocks cannot be resized in-place */
-}
-
-
-/**
- * Try to allocate @a size bytes memory area from the @a pool.
- *
- * If allocation fails, @a required_bytes is updated with size required to be
- * freed in the @a pool from rellocatable area to allocate requested number
- * of bytes.
- * Allocated memory area is always not rellocatable ("from end").
- *
- * @param pool memory pool to use for the operation
- * @param size the size of memory in bytes to allocate
- * @param[out] required_bytes the pointer to variable to be updated with
- * the size of the required additional free
- * memory area, set to 0 if function succeeds.
- * Cannot be NULL.
- * @return the pointer to allocated memory area if succeed,
- * NULL if the pool doesn't have enough space, required_bytes is updated
- * with amount of space needed to be freed in rellocatable area or
- * set to SIZE_MAX if requested size is too large for the pool.
- */
-MHD_INTERNAL void *
-mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool,
- size_t size,
- size_t *restrict required_bytes)
-{
- void *ret;
- size_t asize;
-
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
- asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
- if ( (0 == asize) && (0 != size) )
- { /* size is too close to SIZE_MAX, very unlikely */
- *required_bytes = SIZE_MAX;
- return NULL;
- }
- if (asize > pool->end - pool->pos)
- {
- mhd_assert ((pool->end - pool->pos) == \
- mhd_ROUND_TO_ALIGN (pool->end - pool->pos));
- if (asize <= pool->end)
- *required_bytes = asize - (pool->end - pool->pos);
- else
- *required_bytes = SIZE_MAX;
- return NULL;
- }
- *required_bytes = 0;
- ret = &pool->memory[pool->end - asize];
- pool->end -= asize;
- mhd_UNPOISON_MEMORY (ret, size);
- return ret;
-}
-
-
-/**
- * Reallocate a block of memory obtained from the pool.
- * This is particularly efficient when growing or
- * shrinking the block that was last (re)allocated.
- * If the given block is not the most recently
- * (re)allocated block, the memory of the previous
- * allocation may be not released until the pool is
- * destroyed or reset.
- *
- * @param pool memory pool to use for the operation
- * @param old the existing block
- * @param old_size the size of the existing block
- * @param new_size the new size of the block
- * @return new address of the block, or
- * NULL if the pool cannot support @a new_size
- * bytes (old continues to be valid for @a old_size)
- */
-MHD_INTERNAL void *
-mhd_pool_reallocate (struct mhd_MemoryPool *restrict pool,
- void *restrict old,
- size_t old_size,
- size_t new_size)
-{
- size_t asize;
- uint8_t *new_blc;
-
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (old != NULL || old_size == 0);
- mhd_assert (pool->size >= old_size);
- mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
-#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
- mhd_assert (NULL == __asan_region_is_poisoned (old, old_size));
-#endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
-
- if (NULL != old)
- { /* Have previously allocated data */
- const size_t old_offset = mp_ptr_diff_ (old, pool->memory);
- const bool shrinking = (old_size > new_size);
-
- mhd_assert (mp_ptr_le_ (pool->memory, old));
- /* (pool->memory + pool->size >= (uint8_t*) old + old_size) */
- mhd_assert ((pool->size - mhd_RED_ZONE_SIZE) >= (old_offset + old_size));
- /* Blocks "from the end" must not be reallocated */
- /* (old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */
- mhd_assert ((old_size == 0) || \
- (pool->pos > old_offset));
- mhd_assert ((old_size == 0) || \
- ((pool->end - mhd_RED_ZONE_SIZE) >= (old_offset + old_size)));
- /* Try resizing in-place */
- if (shrinking)
- { /* Shrinking in-place, zero-out freed part */
- memset ((uint8_t *) old + new_size, 0, old_size - new_size);
- mhd_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size);
- }
- if (pool->pos ==
- mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size))
- { /* "old" block is the last allocated block */
- const size_t new_apos =
- mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size);
- if (! shrinking)
- { /* Grow in-place, check for enough space. */
- if ( (new_apos > pool->end) ||
- (new_apos < pool->pos) ) /* Value wrap */
- return NULL; /* No space */
- }
- /* Resized in-place */
- pool->pos = new_apos;
- mhd_UNPOISON_MEMORY (old, new_size);
- return old;
- }
- if (shrinking)
- return old; /* Resized in-place, freed part remains allocated */
- }
- /* Need to allocate new block */
- asize = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
- if ( ( (0 == asize) &&
- (0 != new_size) ) || /* Value wrap, too large new_size. */
- (asize > pool->end - pool->pos) ) /* Not enough space */
- return NULL;
-
- new_blc = pool->memory + pool->pos;
- pool->pos += asize;
-
- mhd_UNPOISON_MEMORY (new_blc, new_size);
- if (0 != old_size)
- {
- /* Move data to new block, old block remains allocated */
- memcpy (new_blc, old, old_size);
- /* Zero-out old block */
- memset (old, 0, old_size);
- mhd_POISON_MEMORY (old, old_size);
- }
- return new_blc;
-}
-
-
-/**
- * Deallocate a block of memory obtained from the pool.
- *
- * If the given block is not the most recently
- * (re)allocated block, the memory of the this block
- * allocation may be not released until the pool is
- * destroyed or reset.
- *
- * @param pool memory pool to use for the operation
- * @param block the allocated block, the NULL is tolerated
- * @param block_size the size of the allocated block
- */
-MHD_INTERNAL void
-mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool,
- void *restrict block,
- size_t block_size)
-{
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (block != NULL || block_size == 0);
- mhd_assert (pool->size >= block_size);
- mhd_assert (pool->pos == mhd_ROUND_TO_ALIGN (pool->pos));
-
- if (NULL != block)
- { /* Have previously allocated data */
- const size_t block_offset = mp_ptr_diff_ (block, pool->memory);
- mhd_assert (mp_ptr_le_ (pool->memory, block));
- mhd_assert (block_offset <= pool->size);
- mhd_assert ((block_offset != pool->pos) || (block_size == 0));
- /* Zero-out deallocated region */
- if (0 != block_size)
- {
- memset (block, 0, block_size);
- mhd_POISON_MEMORY (block, block_size);
- }
-#if ! defined(MHD_FAVOR_SMALL_CODE) && ! defined(MHD_ASAN_POISON_ACTIVE)
- else
- return; /* Zero size, no need to do anything */
-#endif /* ! MHD_FAVOR_SMALL_CODE && ! MHD_ASAN_POISON_ACTIVE */
- if (block_offset <= pool->pos)
- {
- /* "Normal" block, not allocated "from the end". */
- const size_t alg_end =
- mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
- mhd_assert (alg_end <= pool->pos);
- if (alg_end == pool->pos)
- {
- /* The last allocated block, return deallocated block to the pool */
- size_t alg_start = mhd_ROUND_TO_ALIGN (block_offset);
- mhd_assert (alg_start >= block_offset);
-#if defined(MHD_ASAN_POISON_ACTIVE)
- if (alg_start != block_offset)
- {
- mhd_POISON_MEMORY (pool->memory + block_offset, \
- alg_start - block_offset);
- }
- else if (0 != alg_start)
- {
- bool need_red_zone_before;
- mhd_assert (mhd_RED_ZONE_SIZE <= alg_start);
-#if defined(HAVE___ASAN_REGION_IS_POISONED)
- need_red_zone_before =
- (NULL == __asan_region_is_poisoned (pool->memory
- + alg_start
- - mhd_RED_ZONE_SIZE,
- mhd_RED_ZONE_SIZE));
-#elif defined(HAVE___ASAN_ADDRESS_IS_POISONED)
- need_red_zone_before =
- (0 == __asan_address_is_poisoned (pool->memory + alg_start - 1));
-#else /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
- need_red_zone_before = true; /* Unknown, assume new red zone needed */
-#endif /* ! HAVE___ASAN_ADDRESS_IS_POISONED */
- if (need_red_zone_before)
- {
- mhd_POISON_MEMORY (pool->memory + alg_start, mhd_RED_ZONE_SIZE);
- alg_start += mhd_RED_ZONE_SIZE;
- }
- }
-#endif /* MHD_ASAN_POISON_ACTIVE */
- mhd_assert (alg_start <= pool->pos);
- mhd_assert (alg_start == mhd_ROUND_TO_ALIGN (alg_start));
- pool->pos = alg_start;
- }
- }
- else
- {
- /* Allocated "from the end" block. */
- /* The size and the pointers of such block should not be manipulated by
- MHD code (block split is disallowed). */
- mhd_assert (block_offset >= pool->end);
- mhd_assert (mhd_ROUND_TO_ALIGN (block_offset) == block_offset);
- if (block_offset == pool->end)
- {
- /* The last allocated block, return deallocated block to the pool */
- const size_t alg_end =
- mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (block_offset + block_size);
- pool->end = alg_end;
- }
- }
- }
-}
-
-
-/**
- * Clear all entries from the memory pool except
- * for @a keep of the given @a copy_bytes. The pointer
- * returned should be a buffer of @a new_size where
- * the first @a copy_bytes are from @a keep.
- *
- * @param pool memory pool to use for the operation
- * @param keep pointer to the entry to keep (maybe NULL)
- * @param copy_bytes how many bytes need to be kept at this address
- * @param new_size how many bytes should the allocation we return have?
- * (should be larger or equal to @a copy_bytes)
- * @return addr new address of @a keep (if it had to change)
- */
-MHD_INTERNAL void *
-mhd_pool_reset (struct mhd_MemoryPool *restrict pool,
- void *restrict keep,
- size_t copy_bytes,
- size_t new_size)
-{
- mhd_assert (pool->end >= pool->pos);
- mhd_assert (pool->size >= pool->end - pool->pos);
- mhd_assert (copy_bytes <= new_size);
- mhd_assert (copy_bytes <= pool->size);
- mhd_assert (keep != NULL || copy_bytes == 0);
- mhd_assert (keep == NULL || mp_ptr_le_ (pool->memory, keep));
- /* (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep + copy_bytes) */
- mhd_assert ((keep == NULL) || \
- (pool->size >= mp_ptr_diff_ (keep, pool->memory) + copy_bytes));
-#if defined(MHD_ASAN_POISON_ACTIVE) && defined(HAVE___ASAN_REGION_IS_POISONED)
- mhd_assert (NULL == __asan_region_is_poisoned (keep, copy_bytes));
-#endif /* MHD_ASAN_POISON_ACTIVE && HAVE___ASAN_REGION_IS_POISONED */
- mhd_UNPOISON_MEMORY (pool->memory, new_size);
- if ( (NULL != keep) &&
- (keep != pool->memory) )
- {
- if (0 != copy_bytes)
- memmove (pool->memory,
- keep,
- copy_bytes);
- }
- /* technically not needed, but safer to zero out */
- if (pool->size > copy_bytes)
- {
- size_t to_zero; /** Size of area to zero-out */
-
- to_zero = pool->size - copy_bytes;
- mhd_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero);
-#ifdef _WIN32
- if (pool->is_mmap)
- {
- size_t to_recommit; /** Size of decommitted and re-committed area. */
- uint8_t *recommit_addr;
- /* Round down to page size */
- to_recommit = to_zero - to_zero % MHD_sys_page_size_;
- recommit_addr = pool->memory + pool->size - to_recommit;
-
- /* De-committing and re-committing again clear memory and make
- * pages free / available for other needs until accessed. */
- if (VirtualFree (recommit_addr,
- to_recommit,
- MEM_DECOMMIT))
- {
- to_zero -= to_recommit;
-
- if (recommit_addr != VirtualAlloc (recommit_addr,
- to_recommit,
- MEM_COMMIT,
- PAGE_READWRITE))
- abort (); /* Serious error, must never happen */
- }
- }
-#endif /* _WIN32 */
- memset (&pool->memory[copy_bytes],
- 0,
- to_zero);
- }
- pool->pos = mhd_ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
- pool->end = pool->size;
- mhd_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \
- pool->size - new_size);
- return pool->memory;
-}
-
-
-/* end of memorypool.c */
diff --git a/src/mhd2/mhd_mempool.h b/src/mhd2/mhd_mempool.h
@@ -1,197 +0,0 @@
-/*
- This file is part of libmicrohttpd
- Copyright (C) 2007--2024 Daniel Pittman and Christian Grothoff
- Copyright (C) 2016--2024 Evgeny Grin (Karlson2k)
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-*/
-
-/**
- * @file src/mhd2/mhd_mempool.h
- * @brief memory pool; mostly used for efficient (de)allocation
- * for each connection and bounding memory use for each
- * request
- * @author Christian Grothoff
- * @author Karlson2k (Evgeny Grin)
- */
-
-#ifndef MHD_MEMPOOL_H
-#define MHD_MEMPOOL_H 1
-
-#include "mhd_sys_options.h"
-#include "sys_base_types.h"
-#include "sys_bool_type.h"
-
-/**
- * Opaque handle for a memory pool.
- * Pools are not reentrant and must not be used
- * by multiple threads.
- */
-struct mhd_MemoryPool;
-
-/**
- * Initialize values for memory pools
- */
-void
-mhd_init_mem_pools (void);
-
-
-/**
- * Create a memory pool.
- *
- * @param max maximum size of the pool
- * @return NULL on error
- */
-MHD_INTERNAL struct mhd_MemoryPool *
-mdh_pool_create (size_t max);
-
-
-/**
- * Destroy a memory pool.
- *
- * @param pool memory pool to destroy
- */
-MHD_INTERNAL void
-mhd_pool_destroy (struct mhd_MemoryPool *restrict pool);
-
-
-/**
- * Allocate size bytes from the pool.
- *
- * @param pool memory pool to use for the operation
- * @param size number of bytes to allocate
- * @param from_end allocate from end of pool (set to 'true');
- * use this for small, persistent allocations that
- * will never be reallocated
- * @return NULL if the pool cannot support size more
- * bytes
- */
-MHD_INTERNAL void *
-mhd_pool_allocate (struct mhd_MemoryPool *restrict pool,
- size_t size,
- bool from_end);
-
-/**
- * Checks whether allocated block is re-sizable in-place.
- * If block is not re-sizable in-place, it still could be shrunk, but freed
- * memory will not be re-used until reset of the pool.
- * @param pool the memory pool to use
- * @param block the pointer to the allocated block to check
- * @param block_size the size of the allocated @a block
- * @return true if block can be resized in-place in the optimal way,
- * false otherwise
- */
-MHD_INTERNAL bool
-mhd_pool_is_resizable_inplace (struct mhd_MemoryPool *restrict pool,
- void *restrict block,
- size_t block_size);
-
-/**
- * Try to allocate @a size bytes memory area from the @a pool.
- *
- * If allocation fails, @a required_bytes is updated with size required to be
- * freed in the @a pool from rellocatable area to allocate requested number
- * of bytes.
- * Allocated memory area is always not rellocatable ("from end").
- *
- * @param pool memory pool to use for the operation
- * @param size the size of memory in bytes to allocate
- * @param[out] required_bytes the pointer to variable to be updated with
- * the size of the required additional free
- * memory area, set to 0 if function succeeds.
- * Cannot be NULL.
- * @return the pointer to allocated memory area if succeed,
- * NULL if the pool doesn't have enough space, required_bytes is updated
- * with amount of space needed to be freed in rellocatable area or
- * set to SIZE_MAX if requested size is too large for the pool.
- */
-MHD_INTERNAL void *
-mhd_pool_try_alloc (struct mhd_MemoryPool *restrict pool,
- size_t size,
- size_t *restrict required_bytes);
-
-
-/**
- * Reallocate a block of memory obtained from the pool.
- * This is particularly efficient when growing or
- * shrinking the block that was last (re)allocated.
- * If the given block is not the most recently
- * (re)allocated block, the memory of the previous
- * allocation may be not released until the pool is
- * destroyed or reset.
- *
- * @param pool memory pool to use for the operation
- * @param old the existing block
- * @param old_size the size of the existing block
- * @param new_size the new size of the block
- * @return new address of the block, or
- * NULL if the pool cannot support @a new_size
- * bytes (old continues to be valid for @a old_size)
- */
-MHD_INTERNAL void *
-mhd_pool_reallocate (struct mhd_MemoryPool *restrict pool,
- void *restrict old,
- size_t old_size,
- size_t new_size);
-
-
-/**
- * Check how much memory is left in the @a pool
- *
- * @param pool pool to check
- * @return number of bytes still available in @a pool
- */
-MHD_INTERNAL size_t
-mhd_pool_get_free (struct mhd_MemoryPool *restrict pool);
-
-
-/**
- * Deallocate a block of memory obtained from the pool.
- *
- * If the given block is not the most recently
- * (re)allocated block, the memory of the this block
- * allocation may be not released until the pool is
- * destroyed or reset.
- *
- * @param pool memory pool to use for the operation
- * @param block the allocated block, the NULL is tolerated
- * @param block_size the size of the allocated block
- */
-MHD_INTERNAL void
-mhd_pool_deallocate (struct mhd_MemoryPool *restrict pool,
- void *restrict block,
- size_t block_size);
-
-
-/**
- * Clear all entries from the memory pool except
- * for @a keep of the given @a copy_bytes. The pointer
- * returned should be a buffer of @a new_size where
- * the first @a copy_bytes are from @a keep.
- *
- * @param pool memory pool to use for the operation
- * @param keep pointer to the entry to keep (maybe NULL)
- * @param copy_bytes how many bytes need to be kept at this address
- * @param new_size how many bytes should the allocation we return have?
- * (should be larger or equal to @a copy_bytes)
- * @return addr new address of @a keep (if it had to change)
- */
-MHD_INTERNAL void *
-mhd_pool_reset (struct mhd_MemoryPool *restrict pool,
- void *restrict keep,
- size_t copy_bytes,
- size_t new_size);
-
-#endif /* ! MHD_MEMPOOL_H */
diff --git a/src/mhd2/respond_with_error.c b/src/mhd2/respond_with_error.c
@@ -35,7 +35,7 @@
#include "mhd_connection.h"
-#include "mhd_mempool.h"
+#include "mempool_funcs.h"
#include "response_from.h"
#include "daemon_logger.h"
diff --git a/src/mhd2/stream_funcs.c b/src/mhd2/stream_funcs.c
@@ -44,7 +44,7 @@
#include "mhd_daemon.h"
#include "mhd_connection.h"
#include "mhd_response.h"
-#include "mhd_mempool.h"
+#include "mempool_funcs.h"
#include "mhd_str.h"
#include "mhd_str_macros.h"
diff --git a/src/mhd2/stream_process_request.c b/src/mhd2/stream_process_request.c
@@ -56,7 +56,7 @@
#include "daemon_logger.h"
#include "mhd_panic.h"
-#include "mhd_mempool.h"
+#include "mempool_funcs.h"
#include "request_funcs.h"
#include "request_get_value.h"