Embedded SDK
Embedded SDK
Loading...
Searching...
No Matches
mem_alloc.c
Go to the documentation of this file.
1
6/*********************
7 * INCLUDES
8 *********************/
9#include <stdio.h>
10#include <string.h>
11#include "exceptions.h"
12#include "errors.h"
13#include "os_helpers.h"
14#include "os_math.h"
15#include "os_print.h"
16#include "os_utils.h"
17#include "mem_alloc.h"
18
19/*********************
20 * DEFINES
21 *********************/
22// Alignment for payload data size
23#define PAYLOAD_ALIGNEMENT 4
24// Alignment for returned payload
25#define PAYLOAD_DATA_ALIGNEMENT 8
26// sizeof of header for a allocated chunk
27#define ALLOC_CHUNK_HEADER_SIZE 4
28// sizeof of header for a free chunk
29#define FREE_CHUNK_HEADER_SIZE 8
30// size of heap header
31#define HEAP_HEADER_SIZE \
32 ((sizeof(heap_t) + PAYLOAD_DATA_ALIGNEMENT - 1) & ~(PAYLOAD_DATA_ALIGNEMENT - 1))
33
34// number of 2^N segments aggregated as linear
35#define NB_LINEAR_SEGMENTS 5
36// 10 segments (the first one is linear) enable using up to ~32k bytes chunks, which is more than
37// enough
38/* Segregated free list bucket sizes:
39- 0: 1-63 bytes
40- 1: 64-127 bytes
41- 2: 128-255 bytes
42- 4: 512-1023 bytes
43- 6: 2048-4095 bytes
44- 7: 4096-8191 bytes
45- n: 2^(n+5)-(2^(n+6)-1) bytes
46- 9: 16384-32767 bytes
47*/
48#define NB_MAX_SEGMENTS 10
49// 4 sub-segments to have a better segregation of each segments
50#define NB_SUB_SEGMENTS 4
51
52// index is basically the offset in u64 from beginning of full heap buffer
53#define GET_PTR(_heap, index) ((header_t *) (((uint8_t *) _heap) + ((index) << 3)))
54#define GET_IDX(_heap, _ptr) ((((uint8_t *) (_ptr)) - ((uint8_t *) _heap)) >> 3)
55#define GET_PREV(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fprev << 3)))
56#define GET_NEXT(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fnext << 3)))
57
58#define GET_SEGMENT(_size) MAX(NB_LINEAR_SEGMENTS, (31 - __builtin_clz(size)))
59
60/**********************
61 * TYPEDEFS
62 **********************/
63typedef struct header_s {
64 uint16_t size : 15; // the biggest size is 0x7FFF = 0x32767
65 uint16_t allocated : 1;
66
67 uint16_t phys_prev;
68
69 uint16_t fprev;
70 uint16_t fnext;
72
73typedef struct {
74 void *end;
75 size_t nb_segs;
76 uint16_t free_segments[NB_MAX_SEGMENTS * NB_SUB_SEGMENTS];
77} heap_t;
78
79/**********************
80 * STATIC VARIABLES
81 **********************/
82
83/**********************
84 * STATIC PROTOTYPES
85 **********************/
86
87/**********************
88 * LOCAL FUNCTIONS
89 **********************/
90static inline size_t align_alloc_size(size_t size)
91{
92 // Add alignment - 1 to prepare for rounding up if needed
93 // This trick ensures already aligned sizes are not changed
94 size += PAYLOAD_ALIGNEMENT - 1;
95 // Mask off lower bits to align size down to nearest alignment boundary
96 size &= ~(PAYLOAD_ALIGNEMENT - 1);
97 // Either add 4 bytes (if size is 8N+4) or 8 bytes (if size is 8N) so the full
98 // 8-byte header structure fits while preserving alignment.
101 return size;
102}
103
104// This function returns the index into the array segregated explicit free lists
105// corresponding to the given size.
106// For 2^n <= size < 2^(n+1), this function returns n.
107static inline int seglist_index(heap_t *heap, size_t size)
108{
109 size_t seg;
110 uint8_t sub_segment;
111
112 seg = GET_SEGMENT(size);
113 if (seg == NB_LINEAR_SEGMENTS) {
114 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
115 // segment
116 sub_segment = (size >> (seg - 1)) & 0x3;
117 }
118 else {
119 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
120 sub_segment = (size >> (seg - 2)) & 0x3;
121 }
122 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
123 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
124 seg -= NB_LINEAR_SEGMENTS;
125 // check consistency
126 if (seg >= heap->nb_segs) {
127 return -1;
128 }
129 return seg * NB_SUB_SEGMENTS + sub_segment;
130}
131
132// This function returns the index into the array segregated explicit free lists
133// corresponding to the given size, being sure to be big enough (so rounded to next sub-segment).
134static inline int seglist_index_up(heap_t *heap, size_t size)
135{
136 size_t seg;
137 uint8_t sub_segment;
138 size_t sub_segment_size;
139
140 seg = GET_SEGMENT(size);
141 if (seg == NB_LINEAR_SEGMENTS) {
142 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
143 // segment
144 sub_segment_size = (1 << (seg - 1));
145 }
146 else {
147 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
148 sub_segment_size = (1 << (seg - 2));
149 }
150
151 // round size to next sub-segment
152 size += sub_segment_size - 1;
153 size &= ~(sub_segment_size - 1);
154
155 seg = GET_SEGMENT(size);
156 if (seg == NB_LINEAR_SEGMENTS) {
157 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
158 // segment
159 sub_segment = (size >> (seg - 1)) & 0x3;
160 }
161 else {
162 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
163 sub_segment = (size >> (seg - 2)) & 0x3;
164 }
165
166 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
167 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
168 seg -= NB_LINEAR_SEGMENTS;
169
170 // check consistency
171 if (seg >= heap->nb_segs) {
172 return -1;
173 }
174 return seg * NB_SUB_SEGMENTS + sub_segment;
175}
176
177// remove item from linked list
178static inline void list_remove(heap_t *heap, uint16_t *first_free, uint16_t elem)
179{
180 header_t *elem_ptr = GET_PTR(heap, elem);
181 // if was the first, set a new first
182 if (*first_free == elem) {
183 *first_free = elem_ptr->fnext;
184 // if the current first was not alone
185 if (*first_free) {
186 GET_PTR(heap, *first_free)->fprev = 0;
187 }
188 return;
189 }
190 // link previous to following, if existing previous
191 if (elem_ptr->fprev) {
192 GET_PREV(heap, elem_ptr)->fnext = elem_ptr->fnext;
193 }
194 // link following to previous, if existing following
195 if (elem_ptr->fnext) {
196 GET_NEXT(heap, elem_ptr)->fprev = elem_ptr->fprev;
197 }
198}
199
200// add item in LIFO
201static inline void list_push(heap_t *heap, header_t *header)
202{
203 int seg_index = seglist_index(heap, header->size);
204 uint16_t first_idx = heap->free_segments[seg_index];
205 if (seg_index < 0) {
206 return;
207 }
208 // if it's already the first item, nothing to do
209 if (first_idx == GET_IDX(heap, header)) {
210 return;
211 }
212 // link this new first to following (previous first)
213 header->fnext = first_idx;
214 header->fprev = 0;
215 header->allocated = 0;
216 // link following (previous first) to new first
217 if (first_idx != 0) {
218 header_t *first = GET_PTR(heap, first_idx);
219 first->fprev = GET_IDX(heap, header);
220 }
221 // replace new first
222 heap->free_segments[seg_index] = GET_IDX(heap, header);
223}
224
225// ensure the chunk is valid
226static inline void ensure_chunk_valid(heap_t *heap, header_t *header)
227{
228 uint8_t *block = (uint8_t *) header;
229 // ensure size is valid (multiple of pointers) and minimal size of 4 pointers)
230 if ((header->size & (PAYLOAD_ALIGNEMENT - 1)) || (header->size < FREE_CHUNK_HEADER_SIZE)
231 || ((block + header->size) > (uint8_t *) heap->end)) {
232 PRINTF("invalid size: header->size = %d!\n", header->size);
233 THROW(EXCEPTION_CORRUPT);
234 }
235 // ensure phys_prev is valid
236 if (header->phys_prev != 0) {
237 header_t *prev = GET_PTR(heap, header->phys_prev);
238 block = (uint8_t *) heap;
239 if (((uint8_t *) prev < &block[HEAP_HEADER_SIZE]) || ((void *) prev > heap->end)) {
240 PRINTF("corrupted prev_chunk\n");
241 THROW(EXCEPTION_CORRUPT);
242 }
243 // ensure that next of prev is current
244 if ((((uint8_t *) prev) + prev->size) != (uint8_t *) header) {
245 PRINTF("corrupted prev_chunk\n");
246 THROW(EXCEPTION_CORRUPT);
247 }
248 }
249}
250
251// coalesce the two given chunks
252static header_t *coalesce(heap_t *heap, header_t *header, header_t *neighbour)
253{
254 uint16_t neighbour_idx = GET_IDX(heap, neighbour);
255 ensure_chunk_valid(heap, neighbour);
256
257 // if not allocated, coalesce
258 if (!neighbour->allocated) {
259 // remove this neighbour from its free list
260 list_remove(
261 heap, &heap->free_segments[seglist_index(heap, neighbour->size)], neighbour_idx);
262 // link the current next physical chunk (if existing) to this new chunk
263 header_t *next;
264 if (header < neighbour) {
265 next = (header_t *) (((uint8_t *) neighbour) + neighbour->size);
266 if ((void *) next < heap->end) {
267 next->phys_prev = GET_IDX(heap, header);
268 }
269 header->size += neighbour->size;
270 // clean-up neighbour to avoid leaking
271 memset(neighbour, 0, sizeof(*neighbour));
272 }
273 else {
274 next = (header_t *) (((uint8_t *) header) + header->size);
275 // ensure next is valid (inside the heap buffer) before linking it
276 if ((void *) next < heap->end) {
277 next->phys_prev = neighbour_idx;
278 }
279 neighbour->size += header->size;
280 // clean-up header to avoid leaking
281 memset(header, 0, sizeof(*header));
282 // header points now to neighbour
283 header = neighbour;
284 }
285 }
286 return header;
287}
288
289static bool parse_callback(void *data, uint8_t *addr, bool allocated, size_t size)
290{
291 mem_stat_t *stat = (mem_stat_t *) data;
292
293 UNUSED(addr);
294 // error reached
295 if (size == 0) {
296 return true;
297 }
298 stat->nb_chunks++;
299 if (allocated) {
300 stat->nb_allocated++;
301 stat->allocated_size += size;
302 }
303 else {
304 stat->free_size += size;
305 }
306 stat->total_size += size;
307
308 return false;
309}
310
311/**********************
312 * GLOBAL FUNCTIONS
313 **********************/
314
323mem_ctx_t mem_init(void *heap_start, size_t heap_size)
324{
325 heap_t *heap = (heap_t *) heap_start;
326
327 // heap_start must not be NULL
328 if (heap_start == NULL) {
329 return NULL;
330 }
331
332 // size must be a multiple of 8, and at least 200 bytes
333 if ((heap_size & (FREE_CHUNK_HEADER_SIZE - 1)) || (heap_size < 200)
334 || (heap_size > (0x7FF8 + HEAP_HEADER_SIZE))) {
335 return NULL;
336 }
337
338 heap->end = ((uint8_t *) heap_start) + heap_size;
339
340 // compute number of segments
341 heap->nb_segs = 31 - __builtin_clz(heap_size - HEAP_HEADER_SIZE) - NB_LINEAR_SEGMENTS + 1;
342 if (heap->nb_segs > NB_MAX_SEGMENTS) {
343 return NULL;
344 }
345 memset(heap->free_segments, 0, heap->nb_segs * NB_SUB_SEGMENTS * sizeof(uint16_t));
346
347 // initiate free chunk LIFO with the whole heap as a free chunk
348 header_t *first_free = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
349 first_free->size = heap_size - HEAP_HEADER_SIZE;
350 first_free->phys_prev = 0;
351 list_push(heap, first_free);
352
353 return (mem_ctx_t) heap_start;
354}
355
365void *mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
366{
367 heap_t *heap = (heap_t *) ctx;
368 int seg;
369
370 // nb_bytes must be > 0
371 if (nb_bytes == 0) {
372 return NULL;
373 }
374
375 // Adjust size to include header and satisfy alignment requirements, etc.
376 nb_bytes = align_alloc_size(nb_bytes);
377
378 // get the segment sure to be holding this size
379 // if nb_bytes == 2^n , all chunks in [2^n: 2^(n+1)[ are ok
380 // if nb_bytes > 2^n , all chunks in [2^(n+1): 2^(n+2)[ are ok
381 seg = seglist_index_up(heap, nb_bytes);
382 if (seg < 0) {
383 return NULL;
384 }
385 // Loop through all increasing size segments
386 while ((size_t) seg < (heap->nb_segs * NB_SUB_SEGMENTS)) {
387 uint16_t chunk_idx = heap->free_segments[seg];
388 // if this segment is not empty, it must contain a big-enough chunk
389 if (chunk_idx != 0) {
390 header_t *header = GET_PTR(heap, chunk_idx);
391
392 // ensure chunk is consistent
393 ensure_chunk_valid(heap, header);
394 // this block shall always be big enough
395 if (nb_bytes > header->size) {
396 // probably a big issue, maybe necessary to throw an exception
397 return NULL;
398 }
399 uint8_t *block = (uint8_t *) header;
400
401 // remove the block from the segregated list
402 list_remove(heap, &heap->free_segments[seg], chunk_idx);
403 // We could turn the excess bytes into a new free block
404 // the minimum size is the size of an empty chunk
405 if (header->size >= (nb_bytes + FREE_CHUNK_HEADER_SIZE)) {
406 header_t *new_free = (header_t *) &block[nb_bytes];
407 new_free->size = header->size - nb_bytes;
408 // link this new chunk to previous (found) one
409 new_free->phys_prev = chunk_idx;
410 // link the current next physical chunk to this new chunk (if existing)
411 header_t *next = (header_t *) &block[header->size];
412 if ((void *) next < heap->end) {
413 next->phys_prev = GET_IDX(heap, new_free);
414 }
415 list_push(heap, new_free);
416 // change size only if new chunk is created
417 header->size = nb_bytes;
418 }
419
420 // Update the chunk's header to set the allocated bit
421 header->allocated = 0x1;
422
423 // clean-up previous empty header info
424 header->fnext = header->fprev = 0;
425
426 // Return a pointer to the payload
427 return block + ALLOC_CHUNK_HEADER_SIZE;
428 }
429 // not found in current segment, let's try in bigger segment
430 seg++;
431 }
432 return NULL;
433}
434
447void *mem_realloc(mem_ctx_t ctx, void *ptr, size_t size)
448{
449 heap_t *heap = (heap_t *) ctx;
450
451 // Use mem_alloc if original pointer is NULL
452 if (ptr == NULL) {
453 return mem_alloc(ctx, size);
454 }
455
456 // Check ptr is valid
457 if (ptr < (void *) (((uint8_t *) heap) + HEAP_HEADER_SIZE) || ptr >= (void *) heap->end) {
458 PRINTF("invalid pointer passed to realloc: 0x%p!\n", ptr);
459 return NULL;
460 }
461
462 // Free the original block if new size is zero
463 if (size == 0) {
464 mem_free(ctx, ptr);
465 return NULL;
466 }
467
468 // Get the header of the current block
469 uint8_t *block = ((uint8_t *) ptr) - ALLOC_CHUNK_HEADER_SIZE;
470 header_t *header = (header_t *) block;
471
472 // Ensure this is a valid chunk before we try to read its size.
473 ensure_chunk_valid(heap, header);
474
475 // Save the size of the old block
476 size_t old_block_size = header->size;
477 // Save the size of the data portion of the old block
478 size_t old_payload_size = old_block_size - ALLOC_CHUNK_HEADER_SIZE;
479 // Adjust size of the new block to include header and satisfy alignment requirements, etc.
480 size_t new_block_size = align_alloc_size(size);
481
482 // If the old block is already big enough, return the same pointer
483 if (old_payload_size >= size) {
484 // Shrink optimization : split the original block in two
485 // if remaining size is enough for a free chunk
486 size_t remainder = old_block_size - new_block_size;
487 if (remainder >= FREE_CHUNK_HEADER_SIZE) {
488 // Save pointer to next physical chunk
489 header_t *next = (header_t *) &block[old_block_size];
490 // Create new free chunk in the remaining space,
491 // located just after the resized block
492 header_t *new_free = (header_t *) &block[new_block_size];
493 new_free->size = remainder;
494 // The new chunk predecessor is the resized chunk (still the same)
495 new_free->phys_prev = GET_IDX(heap, header);
496 // Link the current next physical chunk to this new chunk (if existing)
497 if ((void *) next < heap->end) {
498 next->phys_prev = GET_IDX(heap, new_free);
499 }
500 list_push(heap, new_free);
501 // Adjust the size of the allocated block
502 header->size = new_block_size;
503 }
504 // Return the original block pointer as it is still valid
505 return ptr;
506 }
507
508 // Allocate new block
509 void *new_ptr = mem_alloc(ctx, size);
510 if (new_ptr == NULL) {
511 return NULL;
512 }
513
514 // We copy only the amount of data that existed in the OLD block.
515 // Since we know old_payload_size < size, we copy old_payload_size.
516 // This guarantees we never read past the end of the old allocated buffer.
517 memcpy(new_ptr, ptr, old_payload_size);
518
519 // Free the old block
520 mem_free(ctx, ptr);
521
522 // Return the new block pointer
523 return new_ptr;
524}
525
532void mem_free(mem_ctx_t ctx, void *ptr)
533{
534 heap_t *heap = (heap_t *) ctx;
535 uint8_t *block = ((uint8_t *) ptr) - ALLOC_CHUNK_HEADER_SIZE;
536 header_t *header = (header_t *) block;
537
538 // ensure size is consistent
539 ensure_chunk_valid(heap, header);
540 // if not allocated, return
541 if (!header->allocated) {
542 return;
543 }
544 // try to coalesce with adjacent physical chunks (after and before)
545 // try with next physical chunk
546 if ((block + header->size) < (uint8_t *) heap->end) {
547 header = coalesce(heap, header, (header_t *) (block + header->size));
548 }
549 // try previous chunk
550 if (header->phys_prev != 0) {
551 header = coalesce(heap, header, GET_PTR(heap, header->phys_prev));
552 }
553
554 // free chunk and add it on top of free chunk LIFO
555 list_push(heap, header);
556}
557
565void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
566{
567 heap_t *heap = (heap_t *) ctx;
568 header_t *header = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
569#ifdef DEBUG_FREE_SEGMENTS
570 for (size_t i = 0; i < heap->nb_segs; i++) {
571 if (heap->free_segments[i] != NULL) {
572 header_t *head = heap->free_segments[i];
573 int nb_elemts = 0;
574 while (head) {
575 nb_elemts++;
576 head = head->fnext;
577 }
578 }
579 }
580#endif // DEBUG_FREE_SEGMENTS
581 while (header != NULL) {
582 uint8_t *block = (uint8_t *) header;
583 // break the loop if the callback returns true
584 if (callback(data, block, header->allocated, header->size)) {
585 return;
586 }
587 if (&block[header->size] < (uint8_t *) heap->end) {
588 header = (header_t *) &block[header->size];
589 }
590 else {
591 return;
592 }
593 }
594}
595
604{
605 memset(stat, 0, sizeof(mem_stat_t));
607 mem_parse(ctx, parse_callback, stat);
608}
#define GET_NEXT(_heap, _ptr)
Definition mem_alloc.c:56
#define GET_PTR(_heap, index)
Definition mem_alloc.c:53
#define ALLOC_CHUNK_HEADER_SIZE
Definition mem_alloc.c:27
void * mem_realloc(mem_ctx_t ctx, void *ptr, size_t size)
Reallocates a buffer to a new size.
Definition mem_alloc.c:447
struct header_s header_t
#define PAYLOAD_ALIGNEMENT
Definition mem_alloc.c:23
#define GET_SEGMENT(_size)
Definition mem_alloc.c:58
mem_ctx_t mem_init(void *heap_start, size_t heap_size)
initializes the heap for this allocator
Definition mem_alloc.c:323
#define NB_LINEAR_SEGMENTS
Definition mem_alloc.c:35
#define NB_SUB_SEGMENTS
Definition mem_alloc.c:50
void * mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
allocates a buffer of the given size, if possible
Definition mem_alloc.c:365
#define NB_MAX_SEGMENTS
Definition mem_alloc.c:48
#define HEAP_HEADER_SIZE
Definition mem_alloc.c:31
#define FREE_CHUNK_HEADER_SIZE
Definition mem_alloc.c:29
void mem_free(mem_ctx_t ctx, void *ptr)
frees the given buffer
Definition mem_alloc.c:532
void mem_stat(mem_ctx_t *ctx, mem_stat_t *stat)
function used to get statistics (nb chunks, nb allocated chunks, total size) about the heap
Definition mem_alloc.c:603
void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
parse the heap
Definition mem_alloc.c:565
#define GET_PREV(_heap, _ptr)
Definition mem_alloc.c:55
#define GET_IDX(_heap, _ptr)
Definition mem_alloc.c:54
Dynamic memory allocation API.
void * mem_ctx_t
type shared externally
Definition mem_alloc.h:27
bool(* mem_parse_callback_t)(void *data, uint8_t *addr, bool allocated, size_t size)
function called for each chunk found in the heap.
Definition mem_alloc.h:40
uint16_t fprev
previous free chunk in the same segment
Definition mem_alloc.c:69
uint16_t size
Definition mem_alloc.c:64
uint16_t phys_prev
physical previous chunk
Definition mem_alloc.c:67
uint16_t fnext
next free chunk in the same segment
Definition mem_alloc.c:70
uint16_t allocated
Definition mem_alloc.c:65
void * end
end of headp buffer, for consistency check
Definition mem_alloc.c:74
size_t nb_segs
actual number of used segments
Definition mem_alloc.c:75
uint16_t free_segments[NB_MAX_SEGMENTS *NB_SUB_SEGMENTS]
Definition mem_alloc.c:76
this structure, filled by mem_stat
Definition mem_alloc.h:46
size_t allocated_size
nb bytes allocated in the heap (including headers)
Definition mem_alloc.h:50
size_t total_size
total size of the heap (including allocator internal data)
Definition mem_alloc.h:47
uint32_t nb_allocated
number of allocated chunks
Definition mem_alloc.h:52
size_t free_size
Definition mem_alloc.h:48
uint32_t nb_chunks
total number of chunks
Definition mem_alloc.h:51