BadVPN – Blame information for rev 1

Subversion Repositories:
Rev:
Rev Author Line No. Line
1 office 1 /**
2 * @file
3 * Dynamic memory manager
4 *
5 * This is a lightweight replacement for the standard C library malloc().
6 *
7 * If you want to use the standard C library malloc() instead, define
8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
9 *
10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
12 * MEMP_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
13 * of pools like this (more pools can be added between _START and _END):
14 *
15 * Define three pools with sizes 256, 512, and 1512 bytes
16 * LWIP_MALLOC_MEMPOOL_START
17 * LWIP_MALLOC_MEMPOOL(20, 256)
18 * LWIP_MALLOC_MEMPOOL(10, 512)
19 * LWIP_MALLOC_MEMPOOL(5, 1512)
20 * LWIP_MALLOC_MEMPOOL_END
21 */
22  
23 /*
24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without modification,
28 * are permitted provided that the following conditions are met:
29 *
30 * 1. Redistributions of source code must retain the above copyright notice,
31 * this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright notice,
33 * this list of conditions and the following disclaimer in the documentation
34 * and/or other materials provided with the distribution.
35 * 3. The name of the author may not be used to endorse or promote products
36 * derived from this software without specific prior written permission.
37 *
38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
47 * OF SUCH DAMAGE.
48 *
49 * This file is part of the lwIP TCP/IP stack.
50 *
51 * Author: Adam Dunkels <adam@sics.se>
52 * Simon Goldschmidt
53 *
54 */
55  
56 #include "lwip/opt.h"
57 #include "lwip/mem.h"
58 #include "lwip/def.h"
59 #include "lwip/sys.h"
60 #include "lwip/stats.h"
61 #include "lwip/err.h"
62  
63 #include <string.h>
64  
65 #if MEM_LIBC_MALLOC
66 #include <stdlib.h> /* for malloc()/free() */
67 #endif
68  
69 /* This is overridable for tests only... */
70 #ifndef LWIP_MEM_ILLEGAL_FREE
71 #define LWIP_MEM_ILLEGAL_FREE(msg) LWIP_ASSERT(msg, 0)
72 #endif
73  
74 #define MEM_STATS_INC_LOCKED(x) SYS_ARCH_LOCKED(MEM_STATS_INC(x))
75 #define MEM_STATS_INC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_INC_USED(x, y))
76 #define MEM_STATS_DEC_USED_LOCKED(x, y) SYS_ARCH_LOCKED(MEM_STATS_DEC_USED(x, y))
77  
78 #if MEM_LIBC_MALLOC || MEM_USE_POOLS
79  
80 /** mem_init is not used when using pools instead of a heap or using
81 * C library malloc().
82 */
83 void
84 mem_init(void)
85 {
86 }
87  
88 /** mem_trim is not used when using pools instead of a heap or using
89 * C library malloc(): we can't free part of a pool element and the stack
90 * support mem_trim() to return a different pointer
91 */
92 void *
93 mem_trim(void *mem, mem_size_t size)
94 {
95 LWIP_UNUSED_ARG(size);
96 return mem;
97 }
98 #endif /* MEM_LIBC_MALLOC || MEM_USE_POOLS */
99  
100 #if MEM_LIBC_MALLOC
101 /* lwIP heap implemented using C library malloc() */
102  
103 /* in case C library malloc() needs extra protection,
104 * allow these defines to be overridden.
105 */
106 #ifndef mem_clib_free
107 #define mem_clib_free free
108 #endif
109 #ifndef mem_clib_malloc
110 #define mem_clib_malloc malloc
111 #endif
112 #ifndef mem_clib_calloc
113 #define mem_clib_calloc calloc
114 #endif
115  
116 #if LWIP_STATS && MEM_STATS
117 #define MEM_LIBC_STATSHELPER_SIZE LWIP_MEM_ALIGN_SIZE(sizeof(mem_size_t))
118 #else
119 #define MEM_LIBC_STATSHELPER_SIZE 0
120 #endif
121  
122 /**
123 * Allocate a block of memory with a minimum of 'size' bytes.
124 *
125 * @param size is the minimum size of the requested block in bytes.
126 * @return pointer to allocated memory or NULL if no free memory was found.
127 *
128 * Note that the returned value must always be aligned (as defined by MEM_ALIGNMENT).
129 */
130 void *
131 mem_malloc(mem_size_t size)
132 {
133 void *ret = mem_clib_malloc(size + MEM_LIBC_STATSHELPER_SIZE);
134 if (ret == NULL) {
135 MEM_STATS_INC_LOCKED(err);
136 } else {
137 LWIP_ASSERT("malloc() must return aligned memory", LWIP_MEM_ALIGN(ret) == ret);
138 #if LWIP_STATS && MEM_STATS
139 *(mem_size_t *)ret = size;
140 ret = (u8_t *)ret + MEM_LIBC_STATSHELPER_SIZE;
141 MEM_STATS_INC_USED_LOCKED(used, size);
142 #endif
143 }
144 return ret;
145 }
146  
147 /** Put memory back on the heap
148 *
149 * @param rmem is the pointer as returned by a previous call to mem_malloc()
150 */
151 void
152 mem_free(void *rmem)
153 {
154 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
155 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
156 #if LWIP_STATS && MEM_STATS
157 rmem = (u8_t *)rmem - MEM_LIBC_STATSHELPER_SIZE;
158 MEM_STATS_DEC_USED_LOCKED(used, *(mem_size_t *)rmem);
159 #endif
160 mem_clib_free(rmem);
161 }
162  
163 #elif MEM_USE_POOLS
164  
165 /* lwIP heap implemented with different sized pools */
166  
167 /**
168 * Allocate memory: determine the smallest pool that is big enough
169 * to contain an element of 'size' and get an element from that pool.
170 *
171 * @param size the size in bytes of the memory needed
172 * @return a pointer to the allocated memory or NULL if the pool is empty
173 */
174 void *
175 mem_malloc(mem_size_t size)
176 {
177 void *ret;
178 struct memp_malloc_helper *element = NULL;
179 memp_t poolnr;
180 mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
181  
182 for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
183 /* is this pool big enough to hold an element of the required size
184 plus a struct memp_malloc_helper that saves the pool this element came from? */
185 if (required_size <= memp_pools[poolnr]->size) {
186 element = (struct memp_malloc_helper *)memp_malloc(poolnr);
187 if (element == NULL) {
188 /* No need to DEBUGF or ASSERT: This error is already taken care of in memp.c */
189 #if MEM_USE_POOLS_TRY_BIGGER_POOL
190 /** Try a bigger pool if this one is empty! */
191 if (poolnr < MEMP_POOL_LAST) {
192 continue;
193 }
194 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
195 MEM_STATS_INC_LOCKED(err);
196 return NULL;
197 }
198 break;
199 }
200 }
201 if (poolnr > MEMP_POOL_LAST) {
202 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
203 MEM_STATS_INC_LOCKED(err);
204 return NULL;
205 }
206  
207 /* save the pool number this element came from */
208 element->poolnr = poolnr;
209 /* and return a pointer to the memory directly after the struct memp_malloc_helper */
210 ret = (u8_t *)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
211  
212 #if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
213 /* truncating to u16_t is safe because struct memp_desc::size is u16_t */
214 element->size = (u16_t)size;
215 MEM_STATS_INC_USED_LOCKED(used, element->size);
216 #endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
217 #if MEMP_OVERFLOW_CHECK
218 /* initialize unused memory (diff between requested size and selected pool's size) */
219 memset((u8_t *)ret + size, 0xcd, memp_pools[poolnr]->size - size);
220 #endif /* MEMP_OVERFLOW_CHECK */
221 return ret;
222 }
223  
224 /**
225 * Free memory previously allocated by mem_malloc. Loads the pool number
226 * and calls memp_free with that pool number to put the element back into
227 * its pool
228 *
229 * @param rmem the memory element to free
230 */
231 void
232 mem_free(void *rmem)
233 {
234 struct memp_malloc_helper *hmem;
235  
236 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
237 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
238  
239 /* get the original struct memp_malloc_helper */
240 /* cast through void* to get rid of alignment warnings */
241 hmem = (struct memp_malloc_helper *)(void *)((u8_t *)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
242  
243 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
244 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
245 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
246  
247 MEM_STATS_DEC_USED_LOCKED(used, hmem->size);
248 #if MEMP_OVERFLOW_CHECK
249 {
250 u16_t i;
251 LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
252 hmem->size <= memp_pools[hmem->poolnr]->size);
253 /* check that unused memory remained untouched (diff between requested size and selected pool's size) */
254 for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
255 u8_t data = *((u8_t *)rmem + i);
256 LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
257 }
258 }
259 #endif /* MEMP_OVERFLOW_CHECK */
260  
261 /* and put it in the pool we saved earlier */
262 memp_free(hmem->poolnr, hmem);
263 }
264  
265 #else /* MEM_USE_POOLS */
266 /* lwIP replacement for your libc malloc() */
267  
268 /**
269 * The heap is made up as a list of structs of this type.
270 * This does not have to be aligned since for getting its size,
271 * we only use the macro SIZEOF_STRUCT_MEM, which automatically aligns.
272 */
273 struct mem {
274 /** index (-> ram[next]) of the next struct */
275 mem_size_t next;
276 /** index (-> ram[prev]) of the previous struct */
277 mem_size_t prev;
278 /** 1: this area is used; 0: this area is unused */
279 u8_t used;
280 };
281  
282 /** All allocated blocks will be MIN_SIZE bytes big, at least!
283 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
284 * larger values could prevent too small blocks to fragment the RAM too much. */
285 #ifndef MIN_SIZE
286 #define MIN_SIZE 12
287 #endif /* MIN_SIZE */
288 /* some alignment macros: we define them here for better source code layout */
289 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
290 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
291 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
292  
293 /** If you want to relocate the heap to external memory, simply define
294 * LWIP_RAM_HEAP_POINTER as a void-pointer to that location.
295 * If so, make sure the memory at that location is big enough (see below on
296 * how that space is calculated). */
297 #ifndef LWIP_RAM_HEAP_POINTER
298 /** the heap. we need one struct mem at the end and some room for alignment */
299 LWIP_DECLARE_MEMORY_ALIGNED(ram_heap, MEM_SIZE_ALIGNED + (2U * SIZEOF_STRUCT_MEM));
300 #define LWIP_RAM_HEAP_POINTER ram_heap
301 #endif /* LWIP_RAM_HEAP_POINTER */
302  
303 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
304 static u8_t *ram;
305 /** the last entry, always unused! */
306 static struct mem *ram_end;
307 /** pointer to the lowest free block, this is used for faster search */
308 static struct mem *lfree;
309  
310 /** concurrent access protection */
311 #if !NO_SYS
312 static sys_mutex_t mem_mutex;
313 #endif
314  
315 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
316  
317 static volatile u8_t mem_free_count;
318  
319 /* Allow mem_free from other (e.g. interrupt) context */
320 #define LWIP_MEM_FREE_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_free)
321 #define LWIP_MEM_FREE_PROTECT() SYS_ARCH_PROTECT(lev_free)
322 #define LWIP_MEM_FREE_UNPROTECT() SYS_ARCH_UNPROTECT(lev_free)
323 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
324 #define LWIP_MEM_ALLOC_PROTECT() SYS_ARCH_PROTECT(lev_alloc)
325 #define LWIP_MEM_ALLOC_UNPROTECT() SYS_ARCH_UNPROTECT(lev_alloc)
326  
327 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
328  
329 /* Protect the heap only by using a mutex */
330 #define LWIP_MEM_FREE_DECL_PROTECT()
331 #define LWIP_MEM_FREE_PROTECT() sys_mutex_lock(&mem_mutex)
332 #define LWIP_MEM_FREE_UNPROTECT() sys_mutex_unlock(&mem_mutex)
333 /* mem_malloc is protected using mutex AND LWIP_MEM_ALLOC_PROTECT */
334 #define LWIP_MEM_ALLOC_DECL_PROTECT()
335 #define LWIP_MEM_ALLOC_PROTECT()
336 #define LWIP_MEM_ALLOC_UNPROTECT()
337  
338 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
339  
340  
341 /**
342 * "Plug holes" by combining adjacent empty struct mems.
343 * After this function is through, there should not exist
344 * one empty struct mem pointing to another empty struct mem.
345 *
346 * @param mem this points to a struct mem which just has been freed
347 * @internal this function is only called by mem_free() and mem_trim()
348 *
349 * This assumes access to the heap is protected by the calling function
350 * already.
351 */
352 static void
353 plug_holes(struct mem *mem)
354 {
355 struct mem *nmem;
356 struct mem *pmem;
357  
358 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
359 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
360 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
361  
362 /* plug hole forward */
363 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
364  
365 nmem = (struct mem *)(void *)&ram[mem->next];
366 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
367 /* if mem->next is unused and not end of ram, combine mem and mem->next */
368 if (lfree == nmem) {
369 lfree = mem;
370 }
371 mem->next = nmem->next;
372 ((struct mem *)(void *)&ram[nmem->next])->prev = (mem_size_t)((u8_t *)mem - ram);
373 }
374  
375 /* plug hole backward */
376 pmem = (struct mem *)(void *)&ram[mem->prev];
377 if (pmem != mem && pmem->used == 0) {
378 /* if mem->prev is unused, combine mem and mem->prev */
379 if (lfree == mem) {
380 lfree = pmem;
381 }
382 pmem->next = mem->next;
383 ((struct mem *)(void *)&ram[mem->next])->prev = (mem_size_t)((u8_t *)pmem - ram);
384 }
385 }
386  
387 /**
388 * Zero the heap and initialize start, end and lowest-free
389 */
390 void
391 mem_init(void)
392 {
393 struct mem *mem;
394  
395 LWIP_ASSERT("Sanity check alignment",
396 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT - 1)) == 0);
397  
398 /* align the heap */
399 ram = (u8_t *)LWIP_MEM_ALIGN(LWIP_RAM_HEAP_POINTER);
400 /* initialize the start of the heap */
401 mem = (struct mem *)(void *)ram;
402 mem->next = MEM_SIZE_ALIGNED;
403 mem->prev = 0;
404 mem->used = 0;
405 /* initialize the end of the heap */
406 ram_end = (struct mem *)(void *)&ram[MEM_SIZE_ALIGNED];
407 ram_end->used = 1;
408 ram_end->next = MEM_SIZE_ALIGNED;
409 ram_end->prev = MEM_SIZE_ALIGNED;
410  
411 /* initialize the lowest-free pointer to the start of the heap */
412 lfree = (struct mem *)(void *)ram;
413  
414 MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
415  
416 if (sys_mutex_new(&mem_mutex) != ERR_OK) {
417 LWIP_ASSERT("failed to create mem_mutex", 0);
418 }
419 }
420  
421 /* Check if a struct mem is correctly linked.
422 * If not, double-free is a possible reason.
423 */
424 static int
425 mem_link_valid(struct mem *mem)
426 {
427 struct mem *nmem, *pmem;
428 mem_size_t rmem_idx;
429 rmem_idx = (mem_size_t)((u8_t *)mem - ram);
430 nmem = (struct mem *)(void *)&ram[mem->next];
431 pmem = (struct mem *)(void *)&ram[mem->prev];
432 if ((mem->next > MEM_SIZE_ALIGNED) || (mem->prev > MEM_SIZE_ALIGNED) ||
433 ((mem->prev != rmem_idx) && (pmem->next != rmem_idx)) ||
434 ((nmem != ram_end) && (nmem->prev != rmem_idx))) {
435 return 0;
436 }
437 return 1;
438 }
439  
440 /**
441 * Put a struct mem back on the heap
442 *
443 * @param rmem is the data portion of a struct mem as returned by a previous
444 * call to mem_malloc()
445 */
446 void
447 mem_free(void *rmem)
448 {
449 struct mem *mem;
450 LWIP_MEM_FREE_DECL_PROTECT();
451  
452 if (rmem == NULL) {
453 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
454 return;
455 }
456 if ((((mem_ptr_t)rmem) & (MEM_ALIGNMENT - 1)) != 0) {
457 LWIP_MEM_ILLEGAL_FREE("mem_free: sanity check alignment");
458 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: sanity check alignment\n"));
459 /* protect mem stats from concurrent access */
460 MEM_STATS_INC_LOCKED(illegal);
461 return;
462 }
463  
464 /* Get the corresponding struct mem: */
465 /* cast through void* to get rid of alignment warnings */
466 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
467  
468 if ((u8_t *)mem < ram || (u8_t *)rmem + MIN_SIZE_ALIGNED > (u8_t *)ram_end) {
469 LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory");
470 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
471 /* protect mem stats from concurrent access */
472 MEM_STATS_INC_LOCKED(illegal);
473 return;
474 }
475 /* protect the heap from concurrent access */
476 LWIP_MEM_FREE_PROTECT();
477 /* mem has to be in a used state */
478 if (!mem->used) {
479 LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: double free");
480 LWIP_MEM_FREE_UNPROTECT();
481 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: double free?\n"));
482 /* protect mem stats from concurrent access */
483 MEM_STATS_INC_LOCKED(illegal);
484 return;
485 }
486  
487 if (!mem_link_valid(mem)) {
488 LWIP_MEM_ILLEGAL_FREE("mem_free: illegal memory: non-linked: double free");
489 LWIP_MEM_FREE_UNPROTECT();
490 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory: non-linked: double free?\n"));
491 /* protect mem stats from concurrent access */
492 MEM_STATS_INC_LOCKED(illegal);
493 return;
494 }
495  
496 /* mem is now unused. */
497 mem->used = 0;
498  
499 if (mem < lfree) {
500 /* the newly freed struct is now the lowest */
501 lfree = mem;
502 }
503  
504 MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram)));
505  
506 /* finally, see if prev or next are free also */
507 plug_holes(mem);
508 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
509 mem_free_count = 1;
510 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
511 LWIP_MEM_FREE_UNPROTECT();
512 }
513  
514 /**
515 * Shrink memory returned by mem_malloc().
516 *
517 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
518 * @param new_size required size after shrinking (needs to be smaller than or
519 * equal to the previous size)
520 * @return for compatibility reasons: is always == rmem, at the moment
521 * or NULL if newsize is > old size, in which case rmem is NOT touched
522 * or freed!
523 */
524 void *
525 mem_trim(void *rmem, mem_size_t new_size)
526 {
527 mem_size_t size, newsize;
528 mem_size_t ptr, ptr2;
529 struct mem *mem, *mem2;
530 /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
531 LWIP_MEM_FREE_DECL_PROTECT();
532  
533 /* Expand the size of the allocated memory region so that we can
534 adjust for alignment. */
535 newsize = (mem_size_t)LWIP_MEM_ALIGN_SIZE(new_size);
536 if ((newsize > MEM_SIZE_ALIGNED) || (newsize < new_size)) {
537 return NULL;
538 }
539  
540 if (newsize < MIN_SIZE_ALIGNED) {
541 /* every data block must be at least MIN_SIZE_ALIGNED long */
542 newsize = MIN_SIZE_ALIGNED;
543 }
544  
545 LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
546 (u8_t *)rmem < (u8_t *)ram_end);
547  
548 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
549 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
550 /* protect mem stats from concurrent access */
551 MEM_STATS_INC_LOCKED(illegal);
552 return rmem;
553 }
554 /* Get the corresponding struct mem ... */
555 /* cast through void* to get rid of alignment warnings */
556 mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
557 /* ... and its offset pointer */
558 ptr = (mem_size_t)((u8_t *)mem - ram);
559  
560 size = (mem_size_t)((mem_size_t)(mem->next - ptr) - SIZEOF_STRUCT_MEM);
561 LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
562 if (newsize > size) {
563 /* not supported */
564 return NULL;
565 }
566 if (newsize == size) {
567 /* No change in size, simply return */
568 return rmem;
569 }
570  
571 /* protect the heap from concurrent access */
572 LWIP_MEM_FREE_PROTECT();
573  
574 mem2 = (struct mem *)(void *)&ram[mem->next];
575 if (mem2->used == 0) {
576 /* The next struct is unused, we can simply move it at little */
577 mem_size_t next;
578 /* remember the old next pointer */
579 next = mem2->next;
580 /* create new struct mem which is moved directly after the shrinked mem */
581 ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
582 if (lfree == mem2) {
583 lfree = (struct mem *)(void *)&ram[ptr2];
584 }
585 mem2 = (struct mem *)(void *)&ram[ptr2];
586 mem2->used = 0;
587 /* restore the next pointer */
588 mem2->next = next;
589 /* link it back to mem */
590 mem2->prev = ptr;
591 /* link mem to it */
592 mem->next = ptr2;
593 /* last thing to restore linked list: as we have moved mem2,
594 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
595 * the end of the heap */
596 if (mem2->next != MEM_SIZE_ALIGNED) {
597 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
598 }
599 MEM_STATS_DEC_USED(used, (size - newsize));
600 /* no need to plug holes, we've already done that */
601 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
602 /* Next struct is used but there's room for another struct mem with
603 * at least MIN_SIZE_ALIGNED of data.
604 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
605 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
606 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
607 * region that couldn't hold data, but when mem->next gets freed,
608 * the 2 regions would be combined, resulting in more free memory */
609 ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + newsize);
610 mem2 = (struct mem *)(void *)&ram[ptr2];
611 if (mem2 < lfree) {
612 lfree = mem2;
613 }
614 mem2->used = 0;
615 mem2->next = mem->next;
616 mem2->prev = ptr;
617 mem->next = ptr2;
618 if (mem2->next != MEM_SIZE_ALIGNED) {
619 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
620 }
621 MEM_STATS_DEC_USED(used, (size - newsize));
622 /* the original mem->next is used, so no need to plug holes! */
623 }
624 /* else {
625 next struct mem is used but size between mem and mem2 is not big enough
626 to create another struct mem
627 -> don't do anyhting.
628 -> the remaining space stays unused since it is too small
629 } */
630 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
631 mem_free_count = 1;
632 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
633 LWIP_MEM_FREE_UNPROTECT();
634 return rmem;
635 }
636  
637 /**
638 * Allocate a block of memory with a minimum of 'size' bytes.
639 *
640 * @param size_in is the minimum size of the requested block in bytes.
641 * @return pointer to allocated memory or NULL if no free memory was found.
642 *
643 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
644 */
645 void *
646 mem_malloc(mem_size_t size_in)
647 {
648 mem_size_t ptr, ptr2, size;
649 struct mem *mem, *mem2;
650 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
651 u8_t local_mem_free_count = 0;
652 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
653 LWIP_MEM_ALLOC_DECL_PROTECT();
654  
655 if (size_in == 0) {
656 return NULL;
657 }
658  
659 /* Expand the size of the allocated memory region so that we can
660 adjust for alignment. */
661 size = (mem_size_t)LWIP_MEM_ALIGN_SIZE(size_in);
662 if ((size > MEM_SIZE_ALIGNED) ||
663 (size < size_in)) {
664 return NULL;
665 }
666  
667 if (size < MIN_SIZE_ALIGNED) {
668 /* every data block must be at least MIN_SIZE_ALIGNED long */
669 size = MIN_SIZE_ALIGNED;
670 }
671  
672 /* protect the heap from concurrent access */
673 sys_mutex_lock(&mem_mutex);
674 LWIP_MEM_ALLOC_PROTECT();
675 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
676 /* run as long as a mem_free disturbed mem_malloc or mem_trim */
677 do {
678 local_mem_free_count = 0;
679 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
680  
681 /* Scan through the heap searching for a free block that is big enough,
682 * beginning with the lowest free block.
683 */
684 for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
685 ptr = ((struct mem *)(void *)&ram[ptr])->next) {
686 mem = (struct mem *)(void *)&ram[ptr];
687 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
688 mem_free_count = 0;
689 LWIP_MEM_ALLOC_UNPROTECT();
690 /* allow mem_free or mem_trim to run */
691 LWIP_MEM_ALLOC_PROTECT();
692 if (mem_free_count != 0) {
693 /* If mem_free or mem_trim have run, we have to restart since they
694 could have altered our current struct mem. */
695 local_mem_free_count = 1;
696 break;
697 }
698 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
699  
700 if ((!mem->used) &&
701 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
702 /* mem is not used and at least perfect fit is possible:
703 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
704  
705 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
706 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
707 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
708 * -> split large block, create empty remainder,
709 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
710 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
711 * struct mem would fit in but no data between mem2 and mem2->next
712 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
713 * region that couldn't hold data, but when mem->next gets freed,
714 * the 2 regions would be combined, resulting in more free memory
715 */
716 ptr2 = (mem_size_t)(ptr + SIZEOF_STRUCT_MEM + size);
717 /* create mem2 struct */
718 mem2 = (struct mem *)(void *)&ram[ptr2];
719 mem2->used = 0;
720 mem2->next = mem->next;
721 mem2->prev = ptr;
722 /* and insert it between mem and mem->next */
723 mem->next = ptr2;
724 mem->used = 1;
725  
726 if (mem2->next != MEM_SIZE_ALIGNED) {
727 ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
728 }
729 MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
730 } else {
731 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
732 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
733 * take care of this).
734 * -> near fit or exact fit: do not split, no mem2 creation
735 * also can't move mem->next directly behind mem, since mem->next
736 * will always be used at this point!
737 */
738 mem->used = 1;
739 MEM_STATS_INC_USED(used, mem->next - (mem_size_t)((u8_t *)mem - ram));
740 }
741 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
742 mem_malloc_adjust_lfree:
743 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
744 if (mem == lfree) {
745 struct mem *cur = lfree;
746 /* Find next free block after mem and update lowest free pointer */
747 while (cur->used && cur != ram_end) {
748 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
749 mem_free_count = 0;
750 LWIP_MEM_ALLOC_UNPROTECT();
751 /* prevent high interrupt latency... */
752 LWIP_MEM_ALLOC_PROTECT();
753 if (mem_free_count != 0) {
754 /* If mem_free or mem_trim have run, we have to restart since they
755 could have altered our current struct mem or lfree. */
756 goto mem_malloc_adjust_lfree;
757 }
758 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
759 cur = (struct mem *)(void *)&ram[cur->next];
760 }
761 lfree = cur;
762 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
763 }
764 LWIP_MEM_ALLOC_UNPROTECT();
765 sys_mutex_unlock(&mem_mutex);
766 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
767 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
768 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
769 ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
770 LWIP_ASSERT("mem_malloc: sanity check alignment",
771 (((mem_ptr_t)mem) & (MEM_ALIGNMENT - 1)) == 0);
772  
773 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
774 }
775 }
776 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
777 /* if we got interrupted by a mem_free, try again */
778 } while (local_mem_free_count != 0);
779 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
780 MEM_STATS_INC(err);
781 LWIP_MEM_ALLOC_UNPROTECT();
782 sys_mutex_unlock(&mem_mutex);
783 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
784 return NULL;
785 }
786  
787 #endif /* MEM_USE_POOLS */
788  
789 #if MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS)
790 void *
791 mem_calloc(mem_size_t count, mem_size_t size)
792 {
793 return mem_clib_calloc(count, size);
794 }
795  
796 #else /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */
797 /**
798 * Contiguously allocates enough space for count objects that are size bytes
799 * of memory each and returns a pointer to the allocated memory.
800 *
801 * The allocated memory is filled with bytes of value zero.
802 *
803 * @param count number of objects to allocate
804 * @param size size of the objects to allocate
805 * @return pointer to allocated memory / NULL pointer if there is an error
806 */
807 void *
808 mem_calloc(mem_size_t count, mem_size_t size)
809 {
810 void *p;
811 size_t alloc_size = (size_t)count * (size_t)size;
812  
813 if ((size_t)(mem_size_t)alloc_size != alloc_size) {
814 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_calloc: could not allocate %"SZT_F" bytes\n", alloc_size));
815 return NULL;
816 }
817  
818 /* allocate 'count' objects of size 'size' */
819 p = mem_malloc((mem_size_t)alloc_size);
820 if (p) {
821 /* zero the memory */
822 memset(p, 0, alloc_size);
823 }
824 return p;
825 }
826 #endif /* MEM_LIBC_MALLOC && (!LWIP_STATS || !MEM_STATS) */