aboutsummaryrefslogtreecommitdiff
path: root/src/lib/memorypool.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/memorypool.c')
-rw-r--r--src/lib/memorypool.c340
1 files changed, 340 insertions, 0 deletions
diff --git a/src/lib/memorypool.c b/src/lib/memorypool.c
new file mode 100644
index 00000000..bda45e1e
--- /dev/null
+++ b/src/lib/memorypool.c
@@ -0,0 +1,340 @@
1/*
2 This file is part of libmicrohttpd
3 Copyright (C) 2007, 2009, 2010 Daniel Pittman and Christian Grothoff
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18*/
19
20/**
21 * @file memorypool.c
22 * @brief memory pool
23 * @author Christian Grothoff
24 */
25#include "memorypool.h"
26
27/* define MAP_ANONYMOUS for Mac OS X */
28#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
29#define MAP_ANONYMOUS MAP_ANON
30#endif
31#ifndef MAP_FAILED
32#define MAP_FAILED ((void*)-1)
33#endif
34
35/**
36 * Align to 2x word size (as GNU libc does).
37 */
38#define ALIGN_SIZE (2 * sizeof(void*))
39
40/**
41 * Round up 'n' to a multiple of ALIGN_SIZE.
42 */
43#define ROUND_TO_ALIGN(n) ((n+(ALIGN_SIZE-1)) & (~(ALIGN_SIZE-1)))
44
45
46/**
47 * Handle for a memory pool. Pools are not reentrant and must not be
48 * used by multiple threads.
49 */
50struct MemoryPool
51{
52
53 /**
54 * Pointer to the pool's memory
55 */
56 char *memory;
57
58 /**
59 * Size of the pool.
60 */
61 size_t size;
62
63 /**
64 * Offset of the first unallocated byte.
65 */
66 size_t pos;
67
68 /**
69 * Offset of the last unallocated byte.
70 */
71 size_t end;
72
73 /**
74 * #MHD_NO if pool was malloc'ed, #MHD_YES if mmapped (VirtualAlloc'ed for W32).
75 */
76 int is_mmap;
77};
78
79
80/**
81 * Free the memory given by @a ptr. Calls "free(ptr)". This function
82 * should be used to free the username returned by
83 * #MHD_digest_auth_get_username().
84 * @note Since v0.9.56
85 *
86 * @param ptr pointer to free.
87 */
88_MHD_EXTERN void
89MHD_free (void *ptr)
90{
91 free (ptr);
92}
93
94
95/**
96 * Create a memory pool.
97 *
98 * @param max maximum size of the pool
99 * @return NULL on error
100 */
101struct MemoryPool *
102MHD_pool_create (size_t max)
103{
104 struct MemoryPool *pool;
105
106 pool = malloc (sizeof (struct MemoryPool));
107 if (NULL == pool)
108 return NULL;
109#if defined(MAP_ANONYMOUS) || defined(_WIN32)
110 if (max <= 32 * 1024)
111 pool->memory = MAP_FAILED;
112 else
113#if defined(MAP_ANONYMOUS) && !defined(_WIN32)
114 pool->memory = mmap (NULL,
115 max,
116 PROT_READ | PROT_WRITE,
117 MAP_PRIVATE | MAP_ANONYMOUS,
118 -1,
119 0);
120#elif defined(_WIN32)
121 pool->memory = VirtualAlloc (NULL,
122 max,
123 MEM_COMMIT | MEM_RESERVE,
124 PAGE_READWRITE);
125#endif
126#else
127 pool->memory = MAP_FAILED;
128#endif
129 if ( (MAP_FAILED == pool->memory) ||
130 (NULL == pool->memory))
131 {
132 pool->memory = malloc (max);
133 if (NULL == pool->memory)
134 {
135 free (pool);
136 return NULL;
137 }
138 pool->is_mmap = MHD_NO;
139 }
140 else
141 {
142 pool->is_mmap = MHD_YES;
143 }
144 pool->pos = 0;
145 pool->end = max;
146 pool->size = max;
147 return pool;
148}
149
150
151/**
152 * Destroy a memory pool.
153 *
154 * @param pool memory pool to destroy
155 */
156void
157MHD_pool_destroy (struct MemoryPool *pool)
158{
159 if (NULL == pool)
160 return;
161 if (MHD_NO == pool->is_mmap)
162 free (pool->memory);
163 else
164#if defined(MAP_ANONYMOUS) && !defined(_WIN32)
165 munmap (pool->memory,
166 pool->size);
167#elif defined(_WIN32)
168 VirtualFree (pool->memory,
169 0,
170 MEM_RELEASE);
171#else
172 abort ();
173#endif
174 free (pool);
175}
176
177
178/**
179 * Check how much memory is left in the @a pool
180 *
181 * @param pool pool to check
182 * @return number of bytes still available in @a pool
183 */
184size_t
185MHD_pool_get_free (struct MemoryPool *pool)
186{
187 return (pool->end - pool->pos);
188}
189
190
191/**
192 * Allocate size bytes from the pool.
193 *
194 * @param pool memory pool to use for the operation
195 * @param size number of bytes to allocate
196 * @param from_end allocate from end of pool (set to #MHD_YES);
197 * use this for small, persistent allocations that
198 * will never be reallocated
199 * @return NULL if the pool cannot support size more
200 * bytes
201 */
202void *
203MHD_pool_allocate (struct MemoryPool *pool,
204 size_t size,
205 int from_end)
206{
207 void *ret;
208 size_t asize;
209
210 asize = ROUND_TO_ALIGN (size);
211 if ( (0 == asize) && (0 != size) )
212 return NULL; /* size too close to SIZE_MAX */
213 if ( (pool->pos + asize > pool->end) ||
214 (pool->pos + asize < pool->pos))
215 return NULL;
216 if (from_end == MHD_YES)
217 {
218 ret = &pool->memory[pool->end - asize];
219 pool->end -= asize;
220 }
221 else
222 {
223 ret = &pool->memory[pool->pos];
224 pool->pos += asize;
225 }
226 return ret;
227}
228
229
230/**
231 * Reallocate a block of memory obtained from the pool.
232 * This is particularly efficient when growing or
233 * shrinking the block that was last (re)allocated.
234 * If the given block is not the most recently
235 * (re)allocated block, the memory of the previous
236 * allocation may be leaked until the pool is
237 * destroyed (and copying the data maybe required).
238 *
239 * @param pool memory pool to use for the operation
240 * @param old the existing block
241 * @param old_size the size of the existing block
242 * @param new_size the new size of the block
243 * @return new address of the block, or
244 * NULL if the pool cannot support @a new_size
245 * bytes (old continues to be valid for @a old_size)
246 */
247void *
248MHD_pool_reallocate (struct MemoryPool *pool,
249 void *old,
250 size_t old_size,
251 size_t new_size)
252{
253 void *ret;
254 size_t asize;
255
256 asize = ROUND_TO_ALIGN (new_size);
257 if ( (0 == asize) &&
258 (0 != new_size) )
259 return NULL; /* new_size too close to SIZE_MAX */
260 if ( (pool->end < old_size) ||
261 (pool->end < asize) )
262 return NULL; /* unsatisfiable or bogus request */
263
264 if ( (pool->pos >= old_size) &&
265 (&pool->memory[pool->pos - old_size] == old) )
266 {
267 /* was the previous allocation - optimize! */
268 if (pool->pos + asize - old_size <= pool->end)
269 {
270 /* fits */
271 pool->pos += asize - old_size;
272 if (asize < old_size) /* shrinking - zero again! */
273 memset (&pool->memory[pool->pos],
274 0,
275 old_size - asize);
276 return old;
277 }
278 /* does not fit */
279 return NULL;
280 }
281 if (asize <= old_size)
282 return old; /* cannot shrink, no need to move */
283 if ((pool->pos + asize >= pool->pos) &&
284 (pool->pos + asize <= pool->end))
285 {
286 /* fits */
287 ret = &pool->memory[pool->pos];
288 if (0 != old_size)
289 memmove (ret,
290 old,
291 old_size);
292 pool->pos += asize;
293 return ret;
294 }
295 /* does not fit */
296 return NULL;
297}
298
299
300/**
301 * Clear all entries from the memory pool except
302 * for @a keep of the given @a size. The pointer
303 * returned should be a buffer of @a new_size where
304 * the first @a copy_bytes are from @a keep.
305 *
306 * @param pool memory pool to use for the operation
307 * @param keep pointer to the entry to keep (maybe NULL)
308 * @param copy_bytes how many bytes need to be kept at this address
309 * @param new_size how many bytes should the allocation we return have?
310 * (should be larger or equal to @a copy_bytes)
311 * @return addr new address of @a keep (if it had to change)
312 */
313void *
314MHD_pool_reset (struct MemoryPool *pool,
315 void *keep,
316 size_t copy_bytes,
317 size_t new_size)
318{
319 if ( (NULL != keep) &&
320 (keep != pool->memory) )
321 {
322 if (0 != copy_bytes)
323 memmove (pool->memory,
324 keep,
325 copy_bytes);
326 keep = pool->memory;
327 }
328 pool->end = pool->size;
329 /* technically not needed, but safer to zero out */
330 if (pool->size > copy_bytes)
331 memset (&pool->memory[copy_bytes],
332 0,
333 pool->size - copy_bytes);
334 if (NULL != keep)
335 pool->pos = ROUND_TO_ALIGN (new_size);
336 return keep;
337}
338
339
340/* end of memorypool.c */