Embedded SDK
Embedded SDK
Loading...
Searching...
No Matches
mem_alloc.c
Go to the documentation of this file.
1
6/*********************
7 * INCLUDES
8 *********************/
9#include <stdio.h>
10#include <string.h>
11#include "exceptions.h"
12#include "errors.h"
13#include "os_helpers.h"
14#include "os_math.h"
15#include "os_print.h"
16#include "os_utils.h"
17#include "mem_alloc.h"
18
19/*********************
20 * DEFINES
21 *********************/
22// Alignment for payload data size
23#define PAYLOAD_ALIGNEMENT 4
24// Alignment for returned payload
25#define PAYLOAD_DATA_ALIGNEMENT 8
26// sizeof of header for a allocated chunk
27#define ALLOC_CHUNK_HEADER_SIZE 4
28// sizeof of header for a free chunk
29#define FREE_CHUNK_HEADER_SIZE 8
30// maximum block size storable in the 15-bit header size field
31#define MAX_BLOCK_SIZE 0x7FFF
32// size of heap header
33#define HEAP_HEADER_SIZE \
34 ((sizeof(heap_t) + PAYLOAD_DATA_ALIGNEMENT - 1) & ~(PAYLOAD_DATA_ALIGNEMENT - 1))
35
36// number of 2^N segments aggregated as linear
37#define NB_LINEAR_SEGMENTS 5
38// 10 segments (the first one is linear) enable using up to ~32k bytes chunks, which is more than
39// enough
40/* Segregated free list bucket sizes:
41- 0: 1-63 bytes
42- 1: 64-127 bytes
43- 2: 128-255 bytes
44- 4: 512-1023 bytes
45- 6: 2048-4095 bytes
46- 7: 4096-8191 bytes
47- n: 2^(n+5)-(2^(n+6)-1) bytes
48- 9: 16384-32767 bytes
49*/
50#define NB_MAX_SEGMENTS 10
51// 4 sub-segments to have a better segregation of each segments
52#define NB_SUB_SEGMENTS 4
53
54// index is basically the offset in u64 from beginning of full heap buffer
55#define GET_PTR(_heap, index) ((header_t *) (((uint8_t *) _heap) + ((index) << 3)))
56#define GET_IDX(_heap, _ptr) ((((uint8_t *) (_ptr)) - ((uint8_t *) _heap)) >> 3)
57#define GET_PREV(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fprev << 3)))
58#define GET_NEXT(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fnext << 3)))
59
60#define GET_SEGMENT(_size) MAX(NB_LINEAR_SEGMENTS, (31 - __builtin_clz(size)))
61
62/**********************
63 * TYPEDEFS
64 **********************/
65typedef struct header_s {
66 uint16_t size : 15; // the biggest size is 0x7FFF = 0x32767
67 uint16_t allocated : 1;
68
69 uint16_t phys_prev;
70
71 uint16_t fprev;
72 uint16_t fnext;
74
75typedef struct {
76 void *end;
77 size_t nb_segs;
78 uint16_t free_segments[NB_MAX_SEGMENTS * NB_SUB_SEGMENTS];
79} heap_t;
80
81/**********************
82 * STATIC VARIABLES
83 **********************/
84
85/**********************
86 * STATIC PROTOTYPES
87 **********************/
88
89/**********************
90 * LOCAL FUNCTIONS
91 **********************/
92static inline size_t align_alloc_size(size_t size)
93{
94 // Add alignment - 1 to prepare for rounding up if needed
95 // This trick ensures already aligned sizes are not changed
96 size += PAYLOAD_ALIGNEMENT - 1;
97 // Mask off lower bits to align size down to nearest alignment boundary
98 size &= ~(PAYLOAD_ALIGNEMENT - 1);
99 // Either add 4 bytes (if size is 8N+4) or 8 bytes (if size is 8N) so the full
100 // 8-byte header structure fits while preserving alignment.
103 return size;
104}
105
106// This function returns the index into the array segregated explicit free lists
107// corresponding to the given size.
108// For 2^n <= size < 2^(n+1), this function returns n.
109static inline int seglist_index(heap_t *heap, size_t size)
110{
111 size_t seg;
112 uint8_t sub_segment;
113
114 seg = GET_SEGMENT(size);
115 if (seg == NB_LINEAR_SEGMENTS) {
116 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
117 // segment
118 sub_segment = (size >> (seg - 1)) & 0x3;
119 }
120 else {
121 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
122 sub_segment = (size >> (seg - 2)) & 0x3;
123 }
124 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
125 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
126 seg -= NB_LINEAR_SEGMENTS;
127 // check consistency
128 if (seg >= heap->nb_segs) {
129 return -1;
130 }
131 return seg * NB_SUB_SEGMENTS + sub_segment;
132}
133
134// This function returns the index into the array segregated explicit free lists
135// corresponding to the given size, being sure to be big enough (so rounded to next sub-segment).
136static inline int seglist_index_up(heap_t *heap, size_t size)
137{
138 size_t seg;
139 uint8_t sub_segment;
140 size_t sub_segment_size;
141
142 seg = GET_SEGMENT(size);
143 if (seg == NB_LINEAR_SEGMENTS) {
144 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
145 // segment
146 sub_segment_size = (1 << (seg - 1));
147 }
148 else {
149 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
150 sub_segment_size = (1 << (seg - 2));
151 }
152
153 // round size to next sub-segment
154 size += sub_segment_size - 1;
155 size &= ~(sub_segment_size - 1);
156
157 seg = GET_SEGMENT(size);
158 if (seg == NB_LINEAR_SEGMENTS) {
159 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
160 // segment
161 sub_segment = (size >> (seg - 1)) & 0x3;
162 }
163 else {
164 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
165 sub_segment = (size >> (seg - 2)) & 0x3;
166 }
167
168 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
169 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
170 seg -= NB_LINEAR_SEGMENTS;
171
172 // check consistency
173 if (seg >= heap->nb_segs) {
174 return -1;
175 }
176 return seg * NB_SUB_SEGMENTS + sub_segment;
177}
178
179// remove item from linked list
180static inline void list_remove(heap_t *heap, uint16_t *first_free, uint16_t elem)
181{
182 header_t *elem_ptr = GET_PTR(heap, elem);
183 // if was the first, set a new first
184 if (*first_free == elem) {
185 *first_free = elem_ptr->fnext;
186 // if the current first was not alone
187 if (*first_free) {
188 GET_PTR(heap, *first_free)->fprev = 0;
189 }
190 return;
191 }
192 // link previous to following, if existing previous
193 if (elem_ptr->fprev) {
194 GET_PREV(heap, elem_ptr)->fnext = elem_ptr->fnext;
195 }
196 // link following to previous, if existing following
197 if (elem_ptr->fnext) {
198 GET_NEXT(heap, elem_ptr)->fprev = elem_ptr->fprev;
199 }
200}
201
202// add item in LIFO
203static inline void list_push(heap_t *heap, header_t *header)
204{
205 int seg_index = seglist_index(heap, header->size);
206 uint16_t first_idx = heap->free_segments[seg_index];
207 if (seg_index < 0) {
208 return;
209 }
210 // if it's already the first item, nothing to do
211 if (first_idx == GET_IDX(heap, header)) {
212 return;
213 }
214 // link this new first to following (previous first)
215 header->fnext = first_idx;
216 header->fprev = 0;
217 header->allocated = 0;
218 // link following (previous first) to new first
219 if (first_idx != 0) {
220 header_t *first = GET_PTR(heap, first_idx);
221 first->fprev = GET_IDX(heap, header);
222 }
223 // replace new first
224 heap->free_segments[seg_index] = GET_IDX(heap, header);
225}
226
227// ensure the chunk is valid
228static inline void ensure_chunk_valid(heap_t *heap, header_t *header)
229{
230 uint8_t *block = (uint8_t *) header;
231 // ensure size is valid (multiple of pointers) and minimal size of 4 pointers)
232 if ((header->size & (PAYLOAD_ALIGNEMENT - 1)) || (header->size < FREE_CHUNK_HEADER_SIZE)
233 || ((block + header->size) > (uint8_t *) heap->end)) {
234 PRINTF("invalid size: header->size = %d!\n", header->size);
235 THROW(EXCEPTION_CORRUPT);
236 }
237 // ensure phys_prev is valid
238 if (header->phys_prev != 0) {
239 header_t *prev = GET_PTR(heap, header->phys_prev);
240 block = (uint8_t *) heap;
241 if (((uint8_t *) prev < &block[HEAP_HEADER_SIZE]) || ((void *) prev > heap->end)) {
242 PRINTF("corrupted prev_chunk\n");
243 THROW(EXCEPTION_CORRUPT);
244 }
245 // ensure that next of prev is current
246 if ((((uint8_t *) prev) + prev->size) != (uint8_t *) header) {
247 PRINTF("corrupted prev_chunk\n");
248 THROW(EXCEPTION_CORRUPT);
249 }
250 }
251}
252
253// coalesce the two given chunks
254static header_t *coalesce(heap_t *heap, header_t *header, header_t *neighbour)
255{
256 uint16_t neighbour_idx = GET_IDX(heap, neighbour);
257 ensure_chunk_valid(heap, neighbour);
258
259 // if not allocated, coalesce
260 if (!neighbour->allocated) {
261 // remove this neighbour from its free list
262 list_remove(
263 heap, &heap->free_segments[seglist_index(heap, neighbour->size)], neighbour_idx);
264 // link the current next physical chunk (if existing) to this new chunk
265 header_t *next;
266 if (header < neighbour) {
267 next = (header_t *) (((uint8_t *) neighbour) + neighbour->size);
268 if ((void *) next < heap->end) {
269 next->phys_prev = GET_IDX(heap, header);
270 }
271 header->size += neighbour->size;
272 // clean-up neighbour to avoid leaking
273 memset(neighbour, 0, sizeof(*neighbour));
274 }
275 else {
276 next = (header_t *) (((uint8_t *) header) + header->size);
277 // ensure next is valid (inside the heap buffer) before linking it
278 if ((void *) next < heap->end) {
279 next->phys_prev = neighbour_idx;
280 }
281 neighbour->size += header->size;
282 // clean-up header to avoid leaking
283 memset(header, 0, sizeof(*header));
284 // header points now to neighbour
285 header = neighbour;
286 }
287 }
288 return header;
289}
290
291static bool parse_callback(void *data, uint8_t *addr, bool allocated, size_t size)
292{
293 mem_stat_t *stat = (mem_stat_t *) data;
294
295 UNUSED(addr);
296 // error reached
297 if (size == 0) {
298 return true;
299 }
300 stat->nb_chunks++;
301 if (allocated) {
302 stat->nb_allocated++;
303 stat->allocated_size += size;
304 }
305 else {
306 stat->free_size += size;
307 }
308 stat->total_size += size;
309
310 return false;
311}
312
313/**********************
314 * GLOBAL FUNCTIONS
315 **********************/
316
325mem_ctx_t mem_init(void *heap_start, size_t heap_size)
326{
327 heap_t *heap = (heap_t *) heap_start;
328
329 // heap_start must not be NULL
330 if (heap_start == NULL) {
331 return NULL;
332 }
333
334 // size must be a multiple of 8, and at least 200 bytes
335 if ((heap_size & (FREE_CHUNK_HEADER_SIZE - 1)) || (heap_size < 200)
336 || (heap_size > (0x7FF8 + HEAP_HEADER_SIZE))) {
337 return NULL;
338 }
339
340 heap->end = ((uint8_t *) heap_start) + heap_size;
341
342 // compute number of segments
343 heap->nb_segs = 31 - __builtin_clz(heap_size - HEAP_HEADER_SIZE) - NB_LINEAR_SEGMENTS + 1;
344 if (heap->nb_segs > NB_MAX_SEGMENTS) {
345 return NULL;
346 }
347 memset(heap->free_segments, 0, heap->nb_segs * NB_SUB_SEGMENTS * sizeof(uint16_t));
348
349 // initiate free chunk LIFO with the whole heap as a free chunk
350 header_t *first_free = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
351 first_free->size = heap_size - HEAP_HEADER_SIZE;
352 first_free->phys_prev = 0;
353 list_push(heap, first_free);
354
355 return (mem_ctx_t) heap_start;
356}
357
367void *mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
368{
369 heap_t *heap = (heap_t *) ctx;
370 int seg;
371
372 // nb_bytes must be > 0
373 if (nb_bytes == 0) {
374 return NULL;
375 }
376
377 // Reject sizes that would overflow the unsigned arithmetic below
378 // or exceed the maximum chunk size storable in the header.
379 if (nb_bytes > MAX_BLOCK_SIZE
381 return NULL;
382 }
383
384 // Adjust size to include header and satisfy alignment requirements, etc.
385 nb_bytes = align_alloc_size(nb_bytes);
386
387 // get the segment sure to be holding this size
388 // if nb_bytes == 2^n , all chunks in [2^n: 2^(n+1)[ are ok
389 // if nb_bytes > 2^n , all chunks in [2^(n+1): 2^(n+2)[ are ok
390 seg = seglist_index_up(heap, nb_bytes);
391 if (seg < 0) {
392 return NULL;
393 }
394 // Loop through all increasing size segments
395 while ((size_t) seg < (heap->nb_segs * NB_SUB_SEGMENTS)) {
396 uint16_t chunk_idx = heap->free_segments[seg];
397 // if this segment is not empty, it must contain a big-enough chunk
398 if (chunk_idx != 0) {
399 header_t *header = GET_PTR(heap, chunk_idx);
400
401 // ensure chunk is consistent
402 ensure_chunk_valid(heap, header);
403 // this block shall always be big enough
404 if (nb_bytes > header->size) {
405 // probably a big issue, maybe necessary to throw an exception
406 return NULL;
407 }
408 uint8_t *block = (uint8_t *) header;
409
410 // remove the block from the segregated list
411 list_remove(heap, &heap->free_segments[seg], chunk_idx);
412 // We could turn the excess bytes into a new free block
413 // the minimum size is the size of an empty chunk
414 if (header->size >= (nb_bytes + FREE_CHUNK_HEADER_SIZE)) {
415 header_t *new_free = (header_t *) &block[nb_bytes];
416 new_free->size = header->size - nb_bytes;
417 // link this new chunk to previous (found) one
418 new_free->phys_prev = chunk_idx;
419 // link the current next physical chunk to this new chunk (if existing)
420 header_t *next = (header_t *) &block[header->size];
421 if ((void *) next < heap->end) {
422 next->phys_prev = GET_IDX(heap, new_free);
423 }
424 list_push(heap, new_free);
425 // change size only if new chunk is created
426 header->size = nb_bytes;
427 }
428
429 // Update the chunk's header to set the allocated bit
430 header->allocated = 0x1;
431
432 // clean-up previous empty header info
433 header->fnext = header->fprev = 0;
434
435 // Return a pointer to the payload
436 return block + ALLOC_CHUNK_HEADER_SIZE;
437 }
438 // not found in current segment, let's try in bigger segment
439 seg++;
440 }
441 return NULL;
442}
443
456void *mem_realloc(mem_ctx_t ctx, void *ptr, size_t size)
457{
458 heap_t *heap = (heap_t *) ctx;
459
460 // Use mem_alloc if original pointer is NULL
461 if (ptr == NULL) {
462 return mem_alloc(ctx, size);
463 }
464
465 // Check ptr is valid
466 if (ptr < (void *) (((uint8_t *) heap) + HEAP_HEADER_SIZE) || ptr >= (void *) heap->end) {
467 PRINTF("invalid pointer passed to realloc: 0x%p!\n", ptr);
468 return NULL;
469 }
470
471 // Free the original block if new size is zero
472 if (size == 0) {
473 mem_free(ctx, ptr);
474 return NULL;
475 }
476
477 // Reject sizes that would overflow the unsigned arithmetic in
478 // align_alloc_size (which adds up to PAYLOAD_ALIGNEMENT - 1 +
479 // ALLOC_CHUNK_HEADER_SIZE + PAYLOAD_ALIGNEMENT bytes of overhead).
480 if (size > MAX_BLOCK_SIZE
482 return NULL;
483 }
484
485 // Get the header of the current block
486 uint8_t *block = ((uint8_t *) ptr) - ALLOC_CHUNK_HEADER_SIZE;
487 header_t *header = (header_t *) block;
488
489 // Ensure this is a valid chunk before we try to read its size.
490 ensure_chunk_valid(heap, header);
491
492 // Save the size of the old block
493 size_t old_block_size = header->size;
494 // Save the size of the data portion of the old block
495 size_t old_payload_size = old_block_size - ALLOC_CHUNK_HEADER_SIZE;
496 // Adjust size of the new block to include header and satisfy alignment requirements, etc.
497 size_t new_block_size = align_alloc_size(size);
498
499 // If the old block is already big enough, return the same pointer
500 if (old_payload_size >= size) {
501 // Shrink optimization : split the original block in two
502 // if remaining size is enough for a free chunk
503 size_t remainder = old_block_size - new_block_size;
504 if (remainder >= FREE_CHUNK_HEADER_SIZE) {
505 // Save pointer to next physical chunk
506 header_t *next = (header_t *) &block[old_block_size];
507 // Create new free chunk in the remaining space,
508 // located just after the resized block
509 header_t *new_free = (header_t *) &block[new_block_size];
510 new_free->size = remainder;
511 // The new chunk predecessor is the resized chunk (still the same)
512 new_free->phys_prev = GET_IDX(heap, header);
513 // Link the current next physical chunk to this new chunk (if existing)
514 if ((void *) next < heap->end) {
515 next->phys_prev = GET_IDX(heap, new_free);
516 }
517 list_push(heap, new_free);
518 // Adjust the size of the allocated block
519 header->size = new_block_size;
520 }
521 // Return the original block pointer as it is still valid
522 return ptr;
523 }
524
525 // Allocate new block
526 void *new_ptr = mem_alloc(ctx, size);
527 if (new_ptr == NULL) {
528 return NULL;
529 }
530
531 // We copy only the amount of data that existed in the OLD block.
532 // Since we know old_payload_size < size, we copy old_payload_size.
533 // This guarantees we never read past the end of the old allocated buffer.
534 memcpy(new_ptr, ptr, old_payload_size);
535
536 // Free the old block
537 mem_free(ctx, ptr);
538
539 // Return the new block pointer
540 return new_ptr;
541}
542
549void mem_free(mem_ctx_t ctx, void *ptr)
550{
551 heap_t *heap = (heap_t *) ctx;
552 uint8_t *block = ((uint8_t *) ptr) - ALLOC_CHUNK_HEADER_SIZE;
553 header_t *header = (header_t *) block;
554
555 // ensure size is consistent
556 ensure_chunk_valid(heap, header);
557 // if not allocated, return
558 if (!header->allocated) {
559 return;
560 }
561 // try to coalesce with adjacent physical chunks (after and before)
562 // try with next physical chunk
563 if ((block + header->size) < (uint8_t *) heap->end) {
564 header = coalesce(heap, header, (header_t *) (block + header->size));
565 }
566 // try previous chunk
567 if (header->phys_prev != 0) {
568 header = coalesce(heap, header, GET_PTR(heap, header->phys_prev));
569 }
570
571 // free chunk and add it on top of free chunk LIFO
572 list_push(heap, header);
573}
574
582void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
583{
584 heap_t *heap = (heap_t *) ctx;
585 header_t *header = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
586#ifdef DEBUG_FREE_SEGMENTS
587 for (size_t i = 0; i < heap->nb_segs; i++) {
588 if (heap->free_segments[i] != NULL) {
589 header_t *head = heap->free_segments[i];
590 int nb_elemts = 0;
591 while (head) {
592 nb_elemts++;
593 head = head->fnext;
594 }
595 }
596 }
597#endif // DEBUG_FREE_SEGMENTS
598 while (header != NULL) {
599 uint8_t *block = (uint8_t *) header;
600 // break the loop if the callback returns true
601 if (callback(data, block, header->allocated, header->size)) {
602 return;
603 }
604 if (&block[header->size] < (uint8_t *) heap->end) {
605 header = (header_t *) &block[header->size];
606 }
607 else {
608 return;
609 }
610 }
611}
612
621{
622 memset(stat, 0, sizeof(mem_stat_t));
624 mem_parse(ctx, parse_callback, stat);
625}
#define GET_NEXT(_heap, _ptr)
Definition mem_alloc.c:58
#define GET_PTR(_heap, index)
Definition mem_alloc.c:55
#define ALLOC_CHUNK_HEADER_SIZE
Definition mem_alloc.c:27
void * mem_realloc(mem_ctx_t ctx, void *ptr, size_t size)
Reallocates a buffer to a new size.
Definition mem_alloc.c:456
struct header_s header_t
#define PAYLOAD_ALIGNEMENT
Definition mem_alloc.c:23
#define GET_SEGMENT(_size)
Definition mem_alloc.c:60
mem_ctx_t mem_init(void *heap_start, size_t heap_size)
initializes the heap for this allocator
Definition mem_alloc.c:325
#define NB_LINEAR_SEGMENTS
Definition mem_alloc.c:37
#define NB_SUB_SEGMENTS
Definition mem_alloc.c:52
void * mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
allocates a buffer of the given size, if possible
Definition mem_alloc.c:367
#define NB_MAX_SEGMENTS
Definition mem_alloc.c:50
#define HEAP_HEADER_SIZE
Definition mem_alloc.c:33
#define MAX_BLOCK_SIZE
Definition mem_alloc.c:31
#define FREE_CHUNK_HEADER_SIZE
Definition mem_alloc.c:29
void mem_free(mem_ctx_t ctx, void *ptr)
frees the given buffer
Definition mem_alloc.c:549
void mem_stat(mem_ctx_t *ctx, mem_stat_t *stat)
function used to get statistics (nb chunks, nb allocated chunks, total size) about the heap
Definition mem_alloc.c:620
void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
parse the heap
Definition mem_alloc.c:582
#define GET_PREV(_heap, _ptr)
Definition mem_alloc.c:57
#define GET_IDX(_heap, _ptr)
Definition mem_alloc.c:56
Dynamic memory allocation API.
void * mem_ctx_t
type shared externally
Definition mem_alloc.h:27
bool(* mem_parse_callback_t)(void *data, uint8_t *addr, bool allocated, size_t size)
function called for each chunk found in the heap.
Definition mem_alloc.h:40
uint16_t fprev
previous free chunk in the same segment
Definition mem_alloc.c:71
uint16_t size
Definition mem_alloc.c:66
uint16_t phys_prev
physical previous chunk
Definition mem_alloc.c:69
uint16_t fnext
next free chunk in the same segment
Definition mem_alloc.c:72
uint16_t allocated
Definition mem_alloc.c:67
void * end
end of headp buffer, for consistency check
Definition mem_alloc.c:76
size_t nb_segs
actual number of used segments
Definition mem_alloc.c:77
uint16_t free_segments[NB_MAX_SEGMENTS *NB_SUB_SEGMENTS]
Definition mem_alloc.c:78
this structure, filled by mem_stat
Definition mem_alloc.h:46
size_t allocated_size
nb bytes allocated in the heap (including headers)
Definition mem_alloc.h:50
size_t total_size
total size of the heap (including allocator internal data)
Definition mem_alloc.h:47
uint32_t nb_allocated
number of allocated chunks
Definition mem_alloc.h:52
size_t free_size
Definition mem_alloc.h:48
uint32_t nb_chunks
total number of chunks
Definition mem_alloc.h:51