aboutsummaryrefslogtreecommitdiff
path: root/src/microhttpd/memorypool.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/microhttpd/memorypool.c')
-rw-r--r--src/microhttpd/memorypool.c239
1 files changed, 122 insertions, 117 deletions
diff --git a/src/microhttpd/memorypool.c b/src/microhttpd/memorypool.c
index 96c20ea6..ddfd08d3 100644
--- a/src/microhttpd/memorypool.c
+++ b/src/microhttpd/memorypool.c
@@ -45,13 +45,13 @@
45#endif /* HAVE_SYSCONF */ 45#endif /* HAVE_SYSCONF */
46 46
47/* define MAP_ANONYMOUS for Mac OS X */ 47/* define MAP_ANONYMOUS for Mac OS X */
48#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) 48#if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
49#define MAP_ANONYMOUS MAP_ANON 49#define MAP_ANONYMOUS MAP_ANON
50#endif 50#endif
51#if defined(_WIN32) 51#if defined(_WIN32)
52#define MAP_FAILED NULL 52#define MAP_FAILED NULL
53#elif !defined(MAP_FAILED) 53#elif ! defined(MAP_FAILED)
54#define MAP_FAILED ((void*)-1) 54#define MAP_FAILED ((void*) -1)
55#endif 55#endif
56 56
57/** 57/**
@@ -62,7 +62,8 @@
62/** 62/**
63 * Round up 'n' to a multiple of ALIGN_SIZE. 63 * Round up 'n' to a multiple of ALIGN_SIZE.
64 */ 64 */
65#define ROUND_TO_ALIGN(n) (((n)+(ALIGN_SIZE-1)) / (ALIGN_SIZE) * (ALIGN_SIZE)) 65#define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
66 / (ALIGN_SIZE) *(ALIGN_SIZE))
66 67
67#if defined(PAGE_SIZE) 68#if defined(PAGE_SIZE)
68#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE 69#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
@@ -93,7 +94,7 @@ MHD_init_mem_pools_ (void)
93#elif defined(_WIN32) 94#elif defined(_WIN32)
94 SYSTEM_INFO si; 95 SYSTEM_INFO si;
95 GetSystemInfo (&si); 96 GetSystemInfo (&si);
96 MHD_sys_page_size_ = (size_t)si.dwPageSize; 97 MHD_sys_page_size_ = (size_t) si.dwPageSize;
97#else 98#else
98 MHD_sys_page_size_ = MHD_DEF_PAGE_SIZE_; 99 MHD_sys_page_size_ = MHD_DEF_PAGE_SIZE_;
99#endif /* _WIN32 */ 100#endif /* _WIN32 */
@@ -151,45 +152,47 @@ MHD_pool_create (size_t max)
151#if defined(MAP_ANONYMOUS) || defined(_WIN32) 152#if defined(MAP_ANONYMOUS) || defined(_WIN32)
152 if ( (max <= 32 * 1024) || 153 if ( (max <= 32 * 1024) ||
153 (max < MHD_sys_page_size_ * 4 / 3) ) 154 (max < MHD_sys_page_size_ * 4 / 3) )
155 {
154 pool->memory = MAP_FAILED; 156 pool->memory = MAP_FAILED;
157 }
155 else 158 else
156 { 159 {
157 /* Round up allocation to page granularity. */ 160 /* Round up allocation to page granularity. */
158 alloc_size = max + MHD_sys_page_size_ - 1; 161 alloc_size = max + MHD_sys_page_size_ - 1;
159 alloc_size -= alloc_size % MHD_sys_page_size_; 162 alloc_size -= alloc_size % MHD_sys_page_size_;
160#if defined(MAP_ANONYMOUS) && !defined(_WIN32) 163#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
161 pool->memory = mmap (NULL, 164 pool->memory = mmap (NULL,
162 alloc_size, 165 alloc_size,
163 PROT_READ | PROT_WRITE, 166 PROT_READ | PROT_WRITE,
164 MAP_PRIVATE | MAP_ANONYMOUS, 167 MAP_PRIVATE | MAP_ANONYMOUS,
165 -1, 168 -1,
166 0); 169 0);
167#elif defined(_WIN32) 170#elif defined(_WIN32)
168 pool->memory = VirtualAlloc (NULL, 171 pool->memory = VirtualAlloc (NULL,
169 alloc_size, 172 alloc_size,
170 MEM_COMMIT | MEM_RESERVE, 173 MEM_COMMIT | MEM_RESERVE,
171 PAGE_READWRITE); 174 PAGE_READWRITE);
172#endif /* _WIN32 */ 175#endif /* _WIN32 */
173 } 176 }
174#else /* ! _WIN32 && ! MAP_ANONYMOUS */ 177#else /* ! _WIN32 && ! MAP_ANONYMOUS */
175 pool->memory = MAP_FAILED; 178 pool->memory = MAP_FAILED;
176#endif /* ! _WIN32 && ! MAP_ANONYMOUS */ 179#endif /* ! _WIN32 && ! MAP_ANONYMOUS */
177 if (MAP_FAILED == pool->memory) 180 if (MAP_FAILED == pool->memory)
181 {
182 alloc_size = ROUND_TO_ALIGN (max);
183 pool->memory = malloc (alloc_size);
184 if (NULL == pool->memory)
178 { 185 {
179 alloc_size = ROUND_TO_ALIGN(max); 186 free (pool);
180 pool->memory = malloc (alloc_size); 187 return NULL;
181 if (NULL == pool->memory)
182 {
183 free (pool);
184 return NULL;
185 }
186 pool->is_mmap = false;
187 } 188 }
189 pool->is_mmap = false;
190 }
188#if defined(MAP_ANONYMOUS) || defined(_WIN32) 191#if defined(MAP_ANONYMOUS) || defined(_WIN32)
189 else 192 else
190 { 193 {
191 pool->is_mmap = true; 194 pool->is_mmap = true;
192 } 195 }
193#endif /* _WIN32 || MAP_ANONYMOUS */ 196#endif /* _WIN32 || MAP_ANONYMOUS */
194 pool->pos = 0; 197 pool->pos = 0;
195 pool->end = alloc_size; 198 pool->end = alloc_size;
@@ -211,10 +214,10 @@ MHD_pool_destroy (struct MemoryPool *pool)
211 214
212 mhd_assert (pool->end >= pool->pos); 215 mhd_assert (pool->end >= pool->pos);
213 mhd_assert (pool->size >= pool->end - pool->pos); 216 mhd_assert (pool->size >= pool->end - pool->pos);
214 if (!pool->is_mmap) 217 if (! pool->is_mmap)
215 free (pool->memory); 218 free (pool->memory);
216 else 219 else
217#if defined(MAP_ANONYMOUS) && !defined(_WIN32) 220#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
218 munmap (pool->memory, 221 munmap (pool->memory,
219 pool->size); 222 pool->size);
220#elif defined(_WIN32) 223#elif defined(_WIN32)
@@ -256,7 +259,7 @@ MHD_pool_get_free (struct MemoryPool *pool)
256 */ 259 */
257void * 260void *
258MHD_pool_allocate (struct MemoryPool *pool, 261MHD_pool_allocate (struct MemoryPool *pool,
259 size_t size, 262 size_t size,
260 bool from_end) 263 bool from_end)
261{ 264{
262 void *ret; 265 void *ret;
@@ -271,15 +274,15 @@ MHD_pool_allocate (struct MemoryPool *pool,
271 (pool->pos + asize < pool->pos)) 274 (pool->pos + asize < pool->pos))
272 return NULL; 275 return NULL;
273 if (from_end) 276 if (from_end)
274 { 277 {
275 ret = &pool->memory[pool->end - asize]; 278 ret = &pool->memory[pool->end - asize];
276 pool->end -= asize; 279 pool->end -= asize;
277 } 280 }
278 else 281 else
279 { 282 {
280 ret = &pool->memory[pool->pos]; 283 ret = &pool->memory[pool->pos];
281 pool->pos += asize; 284 pool->pos += asize;
282 } 285 }
283 return ret; 286 return ret;
284} 287}
285 288
@@ -304,8 +307,8 @@ MHD_pool_allocate (struct MemoryPool *pool,
304void * 307void *
305MHD_pool_reallocate (struct MemoryPool *pool, 308MHD_pool_reallocate (struct MemoryPool *pool,
306 void *old, 309 void *old,
307 size_t old_size, 310 size_t old_size,
308 size_t new_size) 311 size_t new_size)
309{ 312{
310 size_t asize; 313 size_t asize;
311 uint8_t *new_blc; 314 uint8_t *new_blc;
@@ -313,36 +316,37 @@ MHD_pool_reallocate (struct MemoryPool *pool,
313 mhd_assert (pool->end >= pool->pos); 316 mhd_assert (pool->end >= pool->pos);
314 mhd_assert (pool->size >= pool->end - pool->pos); 317 mhd_assert (pool->size >= pool->end - pool->pos);
315 mhd_assert (old != NULL || old_size == 0); 318 mhd_assert (old != NULL || old_size == 0);
316 mhd_assert (old == NULL || pool->memory <= (uint8_t*)old); 319 mhd_assert (old == NULL || pool->memory <= (uint8_t*) old);
317 mhd_assert (old == NULL || pool->memory + pool->size >= (uint8_t*)old + old_size); 320 mhd_assert (old == NULL || pool->memory + pool->size >= (uint8_t*) old
321 + old_size);
318 /* Blocks "from the end" must not be reallocated */ 322 /* Blocks "from the end" must not be reallocated */
319 mhd_assert (old == NULL || pool->memory + pool->pos > (uint8_t*)old); 323 mhd_assert (old == NULL || pool->memory + pool->pos > (uint8_t*) old);
320 324
321 if (0 != old_size) 325 if (0 != old_size)
322 { /* Need to save some data */ 326 { /* Need to save some data */
323 const size_t old_offset = (uint8_t*)old - pool->memory; 327 const size_t old_offset = (uint8_t*) old - pool->memory;
324 const bool shrinking = (old_size > new_size); 328 const bool shrinking = (old_size > new_size);
325 /* Try resizing in-place */ 329 /* Try resizing in-place */
326 if (shrinking) 330 if (shrinking)
327 { /* Shrinking in-place, zero-out freed part */ 331 { /* Shrinking in-place, zero-out freed part */
328 memset ((uint8_t*)old + new_size, 0, old_size - new_size); 332 memset ((uint8_t*) old + new_size, 0, old_size - new_size);
329 } 333 }
330 if (pool->pos == ROUND_TO_ALIGN (old_offset + old_size)) 334 if (pool->pos == ROUND_TO_ALIGN (old_offset + old_size))
331 { /* "old" block is the last allocated block */ 335 { /* "old" block is the last allocated block */
332 const size_t new_apos = ROUND_TO_ALIGN (old_offset + new_size); 336 const size_t new_apos = ROUND_TO_ALIGN (old_offset + new_size);
333 if (!shrinking) 337 if (! shrinking)
334 { /* Grow in-place, check for enough space. */ 338 { /* Grow in-place, check for enough space. */
335 if ( (new_apos > pool->end) || 339 if ( (new_apos > pool->end) ||
336 (new_apos < pool->pos) ) /* Value wrap */ 340 (new_apos < pool->pos) ) /* Value wrap */
337 return NULL; /* No space */ 341 return NULL; /* No space */
338 } 342 }
339 /* Resized in-place */ 343 /* Resized in-place */
340 pool->pos = new_apos; 344 pool->pos = new_apos;
341 return old; 345 return old;
342 }
343 if (shrinking)
344 return old; /* Resized in-place, freed part remains allocated */
345 } 346 }
347 if (shrinking)
348 return old; /* Resized in-place, freed part remains allocated */
349 }
346 /* Need to allocate new block */ 350 /* Need to allocate new block */
347 asize = ROUND_TO_ALIGN (new_size); 351 asize = ROUND_TO_ALIGN (new_size);
348 if ( ( (0 == asize) && 352 if ( ( (0 == asize) &&
@@ -354,12 +358,12 @@ MHD_pool_reallocate (struct MemoryPool *pool,
354 pool->pos += asize; 358 pool->pos += asize;
355 359
356 if (0 != old_size) 360 if (0 != old_size)
357 { 361 {
358 /* Move data to new block, old block remains allocated */ 362 /* Move data to new block, old block remains allocated */
359 memcpy (new_blc, old, old_size); 363 memcpy (new_blc, old, old_size);
360 /* Zero-out old block */ 364 /* Zero-out old block */
361 memset (old, 0, old_size); 365 memset (old, 0, old_size);
362 } 366 }
363 return new_blc; 367 return new_blc;
364} 368}
365 369
@@ -379,59 +383,60 @@ MHD_pool_reallocate (struct MemoryPool *pool,
379 */ 383 */
380void * 384void *
381MHD_pool_reset (struct MemoryPool *pool, 385MHD_pool_reset (struct MemoryPool *pool,
382 void *keep, 386 void *keep,
383 size_t copy_bytes, 387 size_t copy_bytes,
384 size_t new_size) 388 size_t new_size)
385{ 389{
386 mhd_assert (pool->end >= pool->pos); 390 mhd_assert (pool->end >= pool->pos);
387 mhd_assert (pool->size >= pool->end - pool->pos); 391 mhd_assert (pool->size >= pool->end - pool->pos);
388 mhd_assert (copy_bytes < new_size); 392 mhd_assert (copy_bytes < new_size);
389 mhd_assert (keep != NULL || copy_bytes == 0); 393 mhd_assert (keep != NULL || copy_bytes == 0);
390 mhd_assert (keep == NULL || pool->memory <= (uint8_t*)keep); 394 mhd_assert (keep == NULL || pool->memory <= (uint8_t*) keep);
391 mhd_assert (keep == NULL || pool->memory + pool->size >= (uint8_t*)keep + copy_bytes); 395 mhd_assert (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep
396 + copy_bytes);
392 if ( (NULL != keep) && 397 if ( (NULL != keep) &&
393 (keep != pool->memory) ) 398 (keep != pool->memory) )
394 { 399 {
395 if (0 != copy_bytes) 400 if (0 != copy_bytes)
396 memmove (pool->memory, 401 memmove (pool->memory,
397 keep, 402 keep,
398 copy_bytes); 403 copy_bytes);
399 } 404 }
400 /* technically not needed, but safer to zero out */ 405 /* technically not needed, but safer to zero out */
401 if (pool->size > copy_bytes) 406 if (pool->size > copy_bytes)
402 { 407 {
403 size_t to_zero; /** Size of area to zero-out */ 408 size_t to_zero; /** Size of area to zero-out */
404 409
405 to_zero = pool->size - copy_bytes; 410 to_zero = pool->size - copy_bytes;
406#ifdef _WIN32 411#ifdef _WIN32
407 if (pool->is_mmap) 412 if (pool->is_mmap)
408 { 413 {
409 size_t to_recommit; /** Size of decommitted and re-committed area. */ 414 size_t to_recommit; /** Size of decommitted and re-committed area. */
410 uint8_t *recommit_addr; 415 uint8_t *recommit_addr;
411 /* Round down to page size */ 416 /* Round down to page size */
412 to_recommit = to_zero - to_zero % MHD_sys_page_size_; 417 to_recommit = to_zero - to_zero % MHD_sys_page_size_;
413 recommit_addr = pool->memory + pool->size - to_recommit; 418 recommit_addr = pool->memory + pool->size - to_recommit;
414 419
415 /* De-committing and re-committing again clear memory and make 420 /* De-committing and re-committing again clear memory and make
416 * pages free / available for other needs until accessed. */ 421 * pages free / available for other needs until accessed. */
417 if (VirtualFree (recommit_addr, 422 if (VirtualFree (recommit_addr,
418 to_recommit, 423 to_recommit,
419 MEM_DECOMMIT)) 424 MEM_DECOMMIT))
420 { 425 {
421 to_zero -= to_recommit; 426 to_zero -= to_recommit;
422 427
423 if (recommit_addr != VirtualAlloc (recommit_addr, 428 if (recommit_addr != VirtualAlloc (recommit_addr,
424 to_recommit, 429 to_recommit,
425 MEM_COMMIT, 430 MEM_COMMIT,
426 PAGE_READWRITE)) 431 PAGE_READWRITE))
427 abort(); /* Serious error, must never happen */ 432 abort (); /* Serious error, must never happen */
428 } 433 }
429 }
430#endif /* _WIN32 */
431 memset (&pool->memory[copy_bytes],
432 0,
433 to_zero);
434 } 434 }
435#endif /* _WIN32 */
436 memset (&pool->memory[copy_bytes],
437 0,
438 to_zero);
439 }
435 pool->pos = ROUND_TO_ALIGN (new_size); 440 pool->pos = ROUND_TO_ALIGN (new_size);
436 pool->end = pool->size; 441 pool->end = pool->size;
437 return pool->memory; 442 return pool->memory;