diff options
Diffstat (limited to 'src/microhttpd/memorypool.c')
-rw-r--r-- | src/microhttpd/memorypool.c | 75 |
1 files changed, 61 insertions, 14 deletions
diff --git a/src/microhttpd/memorypool.c b/src/microhttpd/memorypool.c index fb6c0652..0f71ab1b 100644 --- a/src/microhttpd/memorypool.c +++ b/src/microhttpd/memorypool.c | |||
@@ -45,6 +45,11 @@ | |||
45 | #define MHD_SC_PAGESIZE _SC_PAGESIZE | 45 | #define MHD_SC_PAGESIZE _SC_PAGESIZE |
46 | #endif /* _SC_PAGESIZE */ | 46 | #endif /* _SC_PAGESIZE */ |
47 | #endif /* HAVE_SYSCONF */ | 47 | #endif /* HAVE_SYSCONF */ |
48 | #include "mhd_limits.h" /* for SIZE_MAX */ | ||
49 | |||
50 | #ifdef MHD_ASAN_POISON_ACTIVE | ||
51 | #include <sanitizer/asan_interface.h> | ||
52 | #endif /* _MHD_USE_ASAN_POISON */ | ||
48 | 53 | ||
49 | /* define MAP_ANONYMOUS for Mac OS X */ | 54 | /* define MAP_ANONYMOUS for Mac OS X */ |
50 | #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS) | 55 | #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS) |
@@ -67,6 +72,28 @@ | |||
67 | #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \ | 72 | #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \ |
68 | / (ALIGN_SIZE) *(ALIGN_SIZE)) | 73 | / (ALIGN_SIZE) *(ALIGN_SIZE)) |
69 | 74 | ||
75 | |||
76 | #ifndef MHD_ASAN_POISON_ACTIVE | ||
77 | #define _MHD_NOSANITIZE_PTRS /**/ | ||
78 | #define _MHD_RED_ZONE_SIZE (0) | ||
79 | #define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) ROUND_TO_ALIGN(n) | ||
80 | #define _MHD_POISON_MEMORY(pointer, size) /**/ | ||
81 | #define _MHD_UNPOISON_MEMORY(pointer, size) /**/ | ||
82 | #else /* MHD_ASAN_POISON_ACTIVE */ | ||
83 | #if defined(FUNC_ATTR_PTRCOMPARE_WOKRS) | ||
84 | #define _MHD_NOSANITIZE_PTRS \ | ||
85 | __attribute__((no_sanitize("pointer-compare","pointer-subtract"))) | ||
86 | #elif defined(FUNC_ATTR_NOSANITIZE_WORKS) | ||
87 | #define _MHD_NOSANITIZE_PTRS __attribute__((no_sanitize("address"))) | ||
88 | #endif | ||
89 | #define _MHD_RED_ZONE_SIZE (ALIGN_SIZE) | ||
90 | #define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) (ROUND_TO_ALIGN(n) + _MHD_RED_ZONE_SIZE) | ||
91 | #define _MHD_POISON_MEMORY(pointer, size) \ | ||
92 | ASAN_POISON_MEMORY_REGION ((pointer), (size)) | ||
93 | #define _MHD_UNPOISON_MEMORY(pointer, size) \ | ||
94 | ASAN_UNPOISON_MEMORY_REGION ((pointer), (size)) | ||
95 | #endif /* MHD_ASAN_POISON_ACTIVE */ | ||
96 | |||
70 | #if defined(PAGE_SIZE) && (0 < (PAGE_SIZE + 0)) | 97 | #if defined(PAGE_SIZE) && (0 < (PAGE_SIZE + 0)) |
71 | #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE | 98 | #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE |
72 | #elif defined(PAGESIZE) && (0 < (PAGESIZE + 0)) | 99 | #elif defined(PAGESIZE) && (0 < (PAGESIZE + 0)) |
@@ -205,6 +232,7 @@ MHD_pool_create (size_t max) | |||
205 | pool->end = alloc_size; | 232 | pool->end = alloc_size; |
206 | pool->size = alloc_size; | 233 | pool->size = alloc_size; |
207 | mhd_assert (0 < alloc_size); | 234 | mhd_assert (0 < alloc_size); |
235 | _MHD_POISON_MEMORY (pool->memory, pool->size); | ||
208 | return pool; | 236 | return pool; |
209 | } | 237 | } |
210 | 238 | ||
@@ -222,6 +250,7 @@ MHD_pool_destroy (struct MemoryPool *pool) | |||
222 | 250 | ||
223 | mhd_assert (pool->end >= pool->pos); | 251 | mhd_assert (pool->end >= pool->pos); |
224 | mhd_assert (pool->size >= pool->end - pool->pos); | 252 | mhd_assert (pool->size >= pool->end - pool->pos); |
253 | _MHD_POISON_MEMORY (pool->memory, pool->size); | ||
225 | if (! pool->is_mmap) | 254 | if (! pool->is_mmap) |
226 | free (pool->memory); | 255 | free (pool->memory); |
227 | else | 256 | else |
@@ -250,7 +279,11 @@ MHD_pool_get_free (struct MemoryPool *pool) | |||
250 | { | 279 | { |
251 | mhd_assert (pool->end >= pool->pos); | 280 | mhd_assert (pool->end >= pool->pos); |
252 | mhd_assert (pool->size >= pool->end - pool->pos); | 281 | mhd_assert (pool->size >= pool->end - pool->pos); |
253 | return (pool->end - pool->pos); | 282 | #ifdef MHD_ASAN_POISON_ACTIVE |
283 | if ((pool->end - pool->pos) <= _MHD_RED_ZONE_SIZE) | ||
284 | return 0; | ||
285 | #endif /* _MHD_USE_ASAN_POISON */ | ||
286 | return (pool->end - pool->pos) - _MHD_RED_ZONE_SIZE; | ||
254 | } | 287 | } |
255 | 288 | ||
256 | 289 | ||
@@ -275,7 +308,7 @@ MHD_pool_allocate (struct MemoryPool *pool, | |||
275 | 308 | ||
276 | mhd_assert (pool->end >= pool->pos); | 309 | mhd_assert (pool->end >= pool->pos); |
277 | mhd_assert (pool->size >= pool->end - pool->pos); | 310 | mhd_assert (pool->size >= pool->end - pool->pos); |
278 | asize = ROUND_TO_ALIGN (size); | 311 | asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size); |
279 | if ( (0 == asize) && (0 != size) ) | 312 | if ( (0 == asize) && (0 != size) ) |
280 | return NULL; /* size too close to SIZE_MAX */ | 313 | return NULL; /* size too close to SIZE_MAX */ |
281 | if ( (pool->pos + asize > pool->end) || | 314 | if ( (pool->pos + asize > pool->end) || |
@@ -291,6 +324,7 @@ MHD_pool_allocate (struct MemoryPool *pool, | |||
291 | ret = &pool->memory[pool->pos]; | 324 | ret = &pool->memory[pool->pos]; |
292 | pool->pos += asize; | 325 | pool->pos += asize; |
293 | } | 326 | } |
327 | _MHD_UNPOISON_MEMORY (ret, size); | ||
294 | return ret; | 328 | return ret; |
295 | } | 329 | } |
296 | 330 | ||
@@ -299,7 +333,7 @@ MHD_pool_allocate (struct MemoryPool *pool, | |||
299 | * Try to allocate @a size bytes memory area from the @a pool. | 333 | * Try to allocate @a size bytes memory area from the @a pool. |
300 | * | 334 | * |
301 | * If allocation fails, @a required_bytes is updated with size required to be | 335 | * If allocation fails, @a required_bytes is updated with size required to be |
302 | * freed in the @a pool from relocatable area to allocate requested number | 336 | * freed in the @a pool from rellocatable area to allocate requested number |
303 | * of bytes. | 337 | * of bytes. |
304 | * Allocated memory area is always not rellocatable ("from end"). | 338 | * Allocated memory area is always not rellocatable ("from end"). |
305 | * | 339 | * |
@@ -311,7 +345,7 @@ MHD_pool_allocate (struct MemoryPool *pool, | |||
311 | * Cannot be NULL. | 345 | * Cannot be NULL. |
312 | * @return the pointer to allocated memory area if succeed, | 346 | * @return the pointer to allocated memory area if succeed, |
313 | * NULL if the pool doesn't have enough space, required_bytes is updated | 347 | * NULL if the pool doesn't have enough space, required_bytes is updated |
314 | * with amount of space needed to be freed in relocatable area or | 348 | * with amount of space needed to be freed in rellocatable area or |
315 | * set to SIZE_MAX if requested size is too large for the pool. | 349 | * set to SIZE_MAX if requested size is too large for the pool. |
316 | */ | 350 | */ |
317 | void * | 351 | void * |
@@ -324,7 +358,7 @@ MHD_pool_try_alloc (struct MemoryPool *pool, | |||
324 | 358 | ||
325 | mhd_assert (pool->end >= pool->pos); | 359 | mhd_assert (pool->end >= pool->pos); |
326 | mhd_assert (pool->size >= pool->end - pool->pos); | 360 | mhd_assert (pool->size >= pool->end - pool->pos); |
327 | asize = ROUND_TO_ALIGN (size); | 361 | asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size); |
328 | if ( (0 == asize) && (0 != size) ) | 362 | if ( (0 == asize) && (0 != size) ) |
329 | { /* size is too close to SIZE_MAX, very unlikely */ | 363 | { /* size is too close to SIZE_MAX, very unlikely */ |
330 | *required_bytes = SIZE_MAX; | 364 | *required_bytes = SIZE_MAX; |
@@ -333,6 +367,8 @@ MHD_pool_try_alloc (struct MemoryPool *pool, | |||
333 | if ( (pool->pos + asize > pool->end) || | 367 | if ( (pool->pos + asize > pool->end) || |
334 | (pool->pos + asize < pool->pos)) | 368 | (pool->pos + asize < pool->pos)) |
335 | { | 369 | { |
370 | mhd_assert ((pool->end - pool->pos) == \ | ||
371 | ROUND_TO_ALIGN (pool->end - pool->pos)); | ||
336 | if (asize <= pool->end) | 372 | if (asize <= pool->end) |
337 | *required_bytes = asize - (pool->end - pool->pos); | 373 | *required_bytes = asize - (pool->end - pool->pos); |
338 | else | 374 | else |
@@ -341,6 +377,7 @@ MHD_pool_try_alloc (struct MemoryPool *pool, | |||
341 | } | 377 | } |
342 | ret = &pool->memory[pool->end - asize]; | 378 | ret = &pool->memory[pool->end - asize]; |
343 | pool->end -= asize; | 379 | pool->end -= asize; |
380 | _MHD_UNPOISON_MEMORY (ret, size); | ||
344 | return ret; | 381 | return ret; |
345 | } | 382 | } |
346 | 383 | ||
@@ -362,7 +399,7 @@ MHD_pool_try_alloc (struct MemoryPool *pool, | |||
362 | * NULL if the pool cannot support @a new_size | 399 | * NULL if the pool cannot support @a new_size |
363 | * bytes (old continues to be valid for @a old_size) | 400 | * bytes (old continues to be valid for @a old_size) |
364 | */ | 401 | */ |
365 | void * | 402 | _MHD_NOSANITIZE_PTRS void * |
366 | MHD_pool_reallocate (struct MemoryPool *pool, | 403 | MHD_pool_reallocate (struct MemoryPool *pool, |
367 | void *old, | 404 | void *old, |
368 | size_t old_size, | 405 | size_t old_size, |
@@ -374,11 +411,11 @@ MHD_pool_reallocate (struct MemoryPool *pool, | |||
374 | mhd_assert (pool->end >= pool->pos); | 411 | mhd_assert (pool->end >= pool->pos); |
375 | mhd_assert (pool->size >= pool->end - pool->pos); | 412 | mhd_assert (pool->size >= pool->end - pool->pos); |
376 | mhd_assert (old != NULL || old_size == 0); | 413 | mhd_assert (old != NULL || old_size == 0); |
377 | mhd_assert (old == NULL || pool->memory <= (uint8_t*) old); | ||
378 | mhd_assert (pool->size >= old_size); | 414 | mhd_assert (pool->size >= old_size); |
415 | mhd_assert (old == NULL || pool->memory <= (uint8_t*) old); | ||
379 | /* (old == NULL || pool->memory + pool->size >= (uint8_t*) old + old_size) */ | 416 | /* (old == NULL || pool->memory + pool->size >= (uint8_t*) old + old_size) */ |
380 | mhd_assert (old == NULL || \ | 417 | mhd_assert (old == NULL || \ |
381 | (pool->size) >= \ | 418 | (pool->size - _MHD_RED_ZONE_SIZE) >= \ |
382 | (((size_t) (((uint8_t*) old) - pool->memory)) + old_size)); | 419 | (((size_t) (((uint8_t*) old) - pool->memory)) + old_size)); |
383 | /* Blocks "from the end" must not be reallocated */ | 420 | /* Blocks "from the end" must not be reallocated */ |
384 | /* (old == NULL || old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */ | 421 | /* (old == NULL || old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */ |
@@ -386,7 +423,7 @@ MHD_pool_reallocate (struct MemoryPool *pool, | |||
386 | pool->pos > (size_t) ((uint8_t*) old - pool->memory)); | 423 | pool->pos > (size_t) ((uint8_t*) old - pool->memory)); |
387 | mhd_assert (old == NULL || old_size == 0 || \ | 424 | mhd_assert (old == NULL || old_size == 0 || \ |
388 | (size_t) (((uint8_t*) old) - pool->memory) + old_size <= \ | 425 | (size_t) (((uint8_t*) old) - pool->memory) + old_size <= \ |
389 | pool->end); | 426 | pool->end - _MHD_RED_ZONE_SIZE); |
390 | 427 | ||
391 | if (0 != old_size) | 428 | if (0 != old_size) |
392 | { /* Have previously allocated data */ | 429 | { /* Have previously allocated data */ |
@@ -396,10 +433,13 @@ MHD_pool_reallocate (struct MemoryPool *pool, | |||
396 | if (shrinking) | 433 | if (shrinking) |
397 | { /* Shrinking in-place, zero-out freed part */ | 434 | { /* Shrinking in-place, zero-out freed part */ |
398 | memset ((uint8_t*) old + new_size, 0, old_size - new_size); | 435 | memset ((uint8_t*) old + new_size, 0, old_size - new_size); |
436 | _MHD_POISON_MEMORY ((uint8_t*) old + new_size, old_size - new_size); | ||
399 | } | 437 | } |
400 | if (pool->pos == ROUND_TO_ALIGN (old_offset + old_size)) | 438 | if (pool->pos == |
439 | ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size)) | ||
401 | { /* "old" block is the last allocated block */ | 440 | { /* "old" block is the last allocated block */ |
402 | const size_t new_apos = ROUND_TO_ALIGN (old_offset + new_size); | 441 | const size_t new_apos = |
442 | ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size); | ||
403 | if (! shrinking) | 443 | if (! shrinking) |
404 | { /* Grow in-place, check for enough space. */ | 444 | { /* Grow in-place, check for enough space. */ |
405 | if ( (new_apos > pool->end) || | 445 | if ( (new_apos > pool->end) || |
@@ -408,13 +448,14 @@ MHD_pool_reallocate (struct MemoryPool *pool, | |||
408 | } | 448 | } |
409 | /* Resized in-place */ | 449 | /* Resized in-place */ |
410 | pool->pos = new_apos; | 450 | pool->pos = new_apos; |
451 | _MHD_UNPOISON_MEMORY (old, new_size); | ||
411 | return old; | 452 | return old; |
412 | } | 453 | } |
413 | if (shrinking) | 454 | if (shrinking) |
414 | return old; /* Resized in-place, freed part remains allocated */ | 455 | return old; /* Resized in-place, freed part remains allocated */ |
415 | } | 456 | } |
416 | /* Need to allocate new block */ | 457 | /* Need to allocate new block */ |
417 | asize = ROUND_TO_ALIGN (new_size); | 458 | asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size); |
418 | if ( ( (0 == asize) && | 459 | if ( ( (0 == asize) && |
419 | (0 != new_size) ) || /* Value wrap, too large new_size. */ | 460 | (0 != new_size) ) || /* Value wrap, too large new_size. */ |
420 | (asize > pool->end - pool->pos) ) /* Not enough space */ | 461 | (asize > pool->end - pool->pos) ) /* Not enough space */ |
@@ -423,12 +464,14 @@ MHD_pool_reallocate (struct MemoryPool *pool, | |||
423 | new_blc = pool->memory + pool->pos; | 464 | new_blc = pool->memory + pool->pos; |
424 | pool->pos += asize; | 465 | pool->pos += asize; |
425 | 466 | ||
467 | _MHD_UNPOISON_MEMORY (new_blc, new_size); | ||
426 | if (0 != old_size) | 468 | if (0 != old_size) |
427 | { | 469 | { |
428 | /* Move data to new block, old block remains allocated */ | 470 | /* Move data to new block, old block remains allocated */ |
429 | memcpy (new_blc, old, old_size); | 471 | memcpy (new_blc, old, old_size); |
430 | /* Zero-out old block */ | 472 | /* Zero-out old block */ |
431 | memset (old, 0, old_size); | 473 | memset (old, 0, old_size); |
474 | _MHD_POISON_MEMORY (old, old_size); | ||
432 | } | 475 | } |
433 | return new_blc; | 476 | return new_blc; |
434 | } | 477 | } |
@@ -447,7 +490,7 @@ MHD_pool_reallocate (struct MemoryPool *pool, | |||
447 | * (should be larger or equal to @a copy_bytes) | 490 | * (should be larger or equal to @a copy_bytes) |
448 | * @return addr new address of @a keep (if it had to change) | 491 | * @return addr new address of @a keep (if it had to change) |
449 | */ | 492 | */ |
450 | void * | 493 | _MHD_NOSANITIZE_PTRS void * |
451 | MHD_pool_reset (struct MemoryPool *pool, | 494 | MHD_pool_reset (struct MemoryPool *pool, |
452 | void *keep, | 495 | void *keep, |
453 | size_t copy_bytes, | 496 | size_t copy_bytes, |
@@ -463,6 +506,7 @@ MHD_pool_reset (struct MemoryPool *pool, | |||
463 | mhd_assert (keep == NULL || \ | 506 | mhd_assert (keep == NULL || \ |
464 | pool->size >= \ | 507 | pool->size >= \ |
465 | ((size_t) ((uint8_t*) keep - pool->memory)) + copy_bytes); | 508 | ((size_t) ((uint8_t*) keep - pool->memory)) + copy_bytes); |
509 | _MHD_UNPOISON_MEMORY (pool->memory, new_size); | ||
466 | if ( (NULL != keep) && | 510 | if ( (NULL != keep) && |
467 | (keep != pool->memory) ) | 511 | (keep != pool->memory) ) |
468 | { | 512 | { |
@@ -477,6 +521,7 @@ MHD_pool_reset (struct MemoryPool *pool, | |||
477 | size_t to_zero; /** Size of area to zero-out */ | 521 | size_t to_zero; /** Size of area to zero-out */ |
478 | 522 | ||
479 | to_zero = pool->size - copy_bytes; | 523 | to_zero = pool->size - copy_bytes; |
524 | _MHD_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero); | ||
480 | #ifdef _WIN32 | 525 | #ifdef _WIN32 |
481 | if (pool->is_mmap) | 526 | if (pool->is_mmap) |
482 | { | 527 | { |
@@ -506,8 +551,10 @@ MHD_pool_reset (struct MemoryPool *pool, | |||
506 | 0, | 551 | 0, |
507 | to_zero); | 552 | to_zero); |
508 | } | 553 | } |
509 | pool->pos = ROUND_TO_ALIGN (new_size); | 554 | pool->pos = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size); |
510 | pool->end = pool->size; | 555 | pool->end = pool->size; |
556 | _MHD_POISON_MEMORY (((uint8_t*) pool->memory) + new_size, \ | ||
557 | pool->size - new_size); | ||
511 | return pool->memory; | 558 | return pool->memory; |
512 | } | 559 | } |
513 | 560 | ||