Embedded SDK
Embedded SDK
Loading...
Searching...
No Matches
mem_alloc.c
Go to the documentation of this file.
1
6/*********************
7 * INCLUDES
8 *********************/
9#include <stdio.h>
10#include <string.h>
11#include "exceptions.h"
12#include "errors.h"
13#include "os_helpers.h"
14#include "os_math.h"
15#include "os_print.h"
16#include "os_utils.h"
17#include "mem_alloc.h"
18
19/*********************
20 * DEFINES
21 *********************/
22// Alignment for payload data size
23#define PAYLOAD_ALIGNEMENT 4
24// Alignment for returned payload
25#define PAYLOAD_DATA_ALIGNEMENT 8
26// sizeof of header for a allocated chunk
27#define ALLOC_CHUNK_HEADER_SIZE 4
28// sizeof of header for a free chunk
29#define FREE_CHUNK_HEADER_SIZE 8
30// size of heap header
31#define HEAP_HEADER_SIZE \
32 ((sizeof(heap_t) + PAYLOAD_DATA_ALIGNEMENT - 1) & ~(PAYLOAD_DATA_ALIGNEMENT - 1))
33
34// number of 2^N segments aggregated as linear
35#define NB_LINEAR_SEGMENTS 5
36// 10 segments (the first one is linear) enable using up to ~32k bytes chunks, which is more than
37// enough
38/* Segregated free list bucket sizes:
39- 0: 1-63 bytes
40- 1: 64-127 bytes
41- 2: 128-255 bytes
42- 4: 512-1023 bytes
43- 6: 2048-4095 bytes
44- 7: 4096-8191 bytes
45- n: 2^(n+5)-(2^(n+6)-1) bytes
46- 9: 16384-32767 bytes
47*/
48#define NB_MAX_SEGMENTS 10
49// 4 sub-segments to have a better segregation of each segments
50#define NB_SUB_SEGMENTS 4
51
52// index is basically the offset in u64 from beginning of full heap buffer
53#define GET_PTR(_heap, index) ((header_t *) (((uint8_t *) _heap) + ((index) << 3)))
54#define GET_IDX(_heap, _ptr) ((((uint8_t *) (_ptr)) - ((uint8_t *) _heap)) >> 3)
55#define GET_PREV(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fprev << 3)))
56#define GET_NEXT(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fnext << 3)))
57
58#define GET_SEGMENT(_size) MAX(NB_LINEAR_SEGMENTS, (31 - __builtin_clz(size)))
59
60/**********************
61 * TYPEDEFS
62 **********************/
63typedef struct header_s {
64 uint16_t size : 15; // the biggest size is 0x7FFF = 0x32767
65 uint16_t allocated : 1;
66
67 uint16_t phys_prev;
68
69 uint16_t fprev;
70 uint16_t fnext;
72
73typedef struct {
74 void *end;
75 size_t nb_segs;
76 uint16_t free_segments[NB_MAX_SEGMENTS * NB_SUB_SEGMENTS];
77} heap_t;
78
79/**********************
80 * STATIC VARIABLES
81 **********************/
82
83/**********************
84 * STATIC PROTOTYPES
85 **********************/
86
87/**********************
88 * LOCAL FUNCTIONS
89 **********************/
90// This function returns the index into the array segregated explicit free lists
91// corresponding to the given size.
92// For 2^n <= size < 2^(n+1), this function returns n.
93static inline int seglist_index(heap_t *heap, size_t size)
94{
95 size_t seg;
96 uint8_t sub_segment;
97
98 seg = GET_SEGMENT(size);
99 if (seg == NB_LINEAR_SEGMENTS) {
100 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
101 // segment
102 sub_segment = (size >> (seg - 1)) & 0x3;
103 }
104 else {
105 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
106 sub_segment = (size >> (seg - 2)) & 0x3;
107 }
108 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
109 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
110 seg -= NB_LINEAR_SEGMENTS;
111 // check consistency
112 if (seg >= heap->nb_segs) {
113 return -1;
114 }
115 return seg * NB_SUB_SEGMENTS + sub_segment;
116}
117
118// This function returns the index into the array segregated explicit free lists
119// corresponding to the given size, being sure to be big enough (so rounded to next sub-segment).
120static inline int seglist_index_up(heap_t *heap, size_t size)
121{
122 size_t seg;
123 uint8_t sub_segment;
124 size_t sub_segment_size;
125
126 seg = GET_SEGMENT(size);
127 if (seg == NB_LINEAR_SEGMENTS) {
128 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
129 // segment
130 sub_segment_size = (1 << (seg - 1));
131 }
132 else {
133 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
134 sub_segment_size = (1 << (seg - 2));
135 }
136
137 // round size to next sub-segment
138 size += sub_segment_size - 1;
139 size &= ~(sub_segment_size - 1);
140
141 seg = GET_SEGMENT(size);
142 if (seg == NB_LINEAR_SEGMENTS) {
143 // for the aggregated segment, the sub-segmant size is 1/NB_SUB_SEGMENTS of the size of the
144 // segment
145 sub_segment = (size >> (seg - 1)) & 0x3;
146 }
147 else {
148 // sub segment size is 1/(NB_SUB_SEGMENTS*2) of the size of the segment
149 sub_segment = (size >> (seg - 2)) & 0x3;
150 }
151
152 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
153 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
154 seg -= NB_LINEAR_SEGMENTS;
155
156 // check consistency
157 if (seg >= heap->nb_segs) {
158 return -1;
159 }
160 return seg * NB_SUB_SEGMENTS + sub_segment;
161}
162
163// remove item from linked list
164static inline void list_remove(heap_t *heap, uint16_t *first_free, uint16_t elem)
165{
166 header_t *elem_ptr = GET_PTR(heap, elem);
167 // if was the first, set a new first
168 if (*first_free == elem) {
169 *first_free = elem_ptr->fnext;
170 // if the current first was not alone
171 if (*first_free) {
172 GET_PTR(heap, *first_free)->fprev = 0;
173 }
174 return;
175 }
176 // link previous to following, if existing previous
177 if (elem_ptr->fprev) {
178 GET_PREV(heap, elem_ptr)->fnext = elem_ptr->fnext;
179 }
180 // link following to previous, if existing following
181 if (elem_ptr->fnext) {
182 GET_NEXT(heap, elem_ptr)->fprev = elem_ptr->fprev;
183 }
184}
185
186// add item in LIFO
187static inline void list_push(heap_t *heap, header_t *header)
188{
189 int seg_index = seglist_index(heap, header->size);
190 uint16_t first_idx = heap->free_segments[seg_index];
191 if (seg_index < 0) {
192 return;
193 }
194 // if it's already the first item, nothing to do
195 if (first_idx == GET_IDX(heap, header)) {
196 return;
197 }
198 // link this new first to following (previous first)
199 header->fnext = first_idx;
200 header->fprev = 0;
201 header->allocated = 0;
202 // link following (previous first) to new first
203 if (first_idx != 0) {
204 header_t *first = GET_PTR(heap, first_idx);
205 first->fprev = GET_IDX(heap, header);
206 }
207 // replace new first
208 heap->free_segments[seg_index] = GET_IDX(heap, header);
209}
210
211// ensure the chunk is valid
212static inline void ensure_chunk_valid(heap_t *heap, header_t *header)
213{
214 uint8_t *block = (uint8_t *) header;
215 // ensure size is valid (multiple of pointers) and minimal size of 4 pointers)
216 if ((header->size & (PAYLOAD_ALIGNEMENT - 1)) || (header->size < FREE_CHUNK_HEADER_SIZE)
217 || ((block + header->size) > (uint8_t *) heap->end)) {
218 PRINTF("invalid size: header->size = %d!\n", header->size);
219 THROW(EXCEPTION_CORRUPT);
220 }
221 // ensure phys_prev is valid
222 if (header->phys_prev != 0) {
223 header_t *prev = GET_PTR(heap, header->phys_prev);
224 block = (uint8_t *) heap;
225 if (((uint8_t *) prev < &block[HEAP_HEADER_SIZE]) || ((void *) prev > heap->end)) {
226 PRINTF("corrupted prev_chunk\n");
227 THROW(EXCEPTION_CORRUPT);
228 }
229 // ensure that next of prev is current
230 if ((((uint8_t *) prev) + prev->size) != (uint8_t *) header) {
231 PRINTF("corrupted prev_chunk\n");
232 THROW(EXCEPTION_CORRUPT);
233 }
234 }
235}
236
237// coalesce the two given chunks
238static header_t *coalesce(heap_t *heap, header_t *header, header_t *neighbour)
239{
240 uint16_t neighbour_idx = GET_IDX(heap, neighbour);
241 ensure_chunk_valid(heap, neighbour);
242
243 // if not allocated, coalesce
244 if (!neighbour->allocated) {
245 // remove this neighbour from its free list
246 list_remove(
247 heap, &heap->free_segments[seglist_index(heap, neighbour->size)], neighbour_idx);
248 // link the current next physical chunk (if existing) to this new chunk
249 header_t *next;
250 if (header < neighbour) {
251 next = (header_t *) (((uint8_t *) neighbour) + neighbour->size);
252 if ((void *) next < heap->end) {
253 next->phys_prev = GET_IDX(heap, header);
254 }
255 header->size += neighbour->size;
256 // clean-up neighbour to avoid leaking
257 memset(neighbour, 0, sizeof(*neighbour));
258 }
259 else {
260 next = (header_t *) (((uint8_t *) header) + header->size);
261 // ensure next is valid (inside the heap buffer) before linking it
262 if ((void *) next < heap->end) {
263 next->phys_prev = neighbour_idx;
264 }
265 neighbour->size += header->size;
266 // clean-up header to avoid leaking
267 memset(header, 0, sizeof(*header));
268 // header points now to neighbour
269 header = neighbour;
270 }
271 }
272 return header;
273}
274
275static bool parse_callback(void *data, uint8_t *addr, bool allocated, size_t size)
276{
277 mem_stat_t *stat = (mem_stat_t *) data;
278
279 UNUSED(addr);
280 // error reached
281 if (size == 0) {
282 return true;
283 }
284 stat->nb_chunks++;
285 if (allocated) {
286 stat->nb_allocated++;
287 stat->allocated_size += size;
288 }
289 else {
290 stat->free_size += size;
291 }
292 stat->total_size += size;
293
294 return false;
295}
296
297/**********************
298 * GLOBAL FUNCTIONS
299 **********************/
300
309mem_ctx_t mem_init(void *heap_start, size_t heap_size)
310{
311 heap_t *heap = (heap_t *) heap_start;
312
313 // size must be a multiple of 8, and at least 200 bytes
314 if ((heap_size & (FREE_CHUNK_HEADER_SIZE - 1)) || (heap_size < 200)
315 || (heap_size > (0x7FF8 + HEAP_HEADER_SIZE))) {
316 return NULL;
317 }
318
319 heap->end = ((uint8_t *) heap_start) + heap_size;
320
321 // compute number of segments
322 heap->nb_segs = 31 - __builtin_clz(heap_size - HEAP_HEADER_SIZE) - NB_LINEAR_SEGMENTS + 1;
323 if (heap->nb_segs > NB_MAX_SEGMENTS) {
324 return NULL;
325 }
326 memset(heap->free_segments, 0, heap->nb_segs * NB_SUB_SEGMENTS * sizeof(uint16_t));
327
328 // initiate free chunk LIFO with the whole heap as a free chunk
329 header_t *first_free = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
330 first_free->size = heap_size - HEAP_HEADER_SIZE;
331 first_free->phys_prev = 0;
332 list_push(heap, first_free);
333
334 return (mem_ctx_t) heap_start;
335}
336
346void *mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
347{
348 heap_t *heap = (heap_t *) ctx;
349 int seg;
350
351 // nb_bytes must be > 0
352 if (nb_bytes == 0) {
353 return NULL;
354 }
355
356 // Adjust size to include header and satisfy alignment requirements, etc.
357 nb_bytes += PAYLOAD_ALIGNEMENT - 1;
358 nb_bytes &= ~(PAYLOAD_ALIGNEMENT - 1);
359
360 // if nb_bytes = 8N + 4, just add 4 for header
361 if (nb_bytes & PAYLOAD_ALIGNEMENT) {
362 nb_bytes += ALLOC_CHUNK_HEADER_SIZE; // for header (size + prev_chunk)
363 }
364 else {
365 // if nb_bytes = 8N, add 8 for header + unused footer
367 }
368
369 // get the segment sure to be holding this size
370 // if nb_bytes == 2^n , all chunks in [2^n: 2^(n+1)[ are ok
371 // if nb_bytes > 2^n , all chunks in [2^(n+1): 2^(n+2)[ are ok
372 seg = seglist_index_up(heap, nb_bytes);
373 if (seg < 0) {
374 return NULL;
375 }
376 // Loop through all increasing size segments
377 while ((size_t) seg < (heap->nb_segs * NB_SUB_SEGMENTS)) {
378 uint16_t chunk_idx = heap->free_segments[seg];
379 // if this segment is not empty, it must contain a big-enough chunk
380 if (chunk_idx != 0) {
381 header_t *header = GET_PTR(heap, chunk_idx);
382
383 // ensure chunk is consistent
384 ensure_chunk_valid(heap, header);
385 // this block shall always be big enough
386 if (nb_bytes > header->size) {
387 // probably a big issue, maybe necessary to throw an exception
388 return NULL;
389 }
390 uint8_t *block = (uint8_t *) header;
391
392 // remove the block from the segregated list
393 list_remove(heap, &heap->free_segments[seg], chunk_idx);
394 // We could turn the excess bytes into a new free block
395 // the minimum size is the size of an empty chunk
396 if (header->size >= (nb_bytes + FREE_CHUNK_HEADER_SIZE)) {
397 header_t *new_free = (header_t *) &block[nb_bytes];
398 new_free->size = header->size - nb_bytes;
399 // link this new chunk to previous (found) one
400 new_free->phys_prev = chunk_idx;
401 // link the current next physical chunk to this new chunk (if existing)
402 header_t *next = (header_t *) &block[header->size];
403 if ((void *) next < heap->end) {
404 next->phys_prev = GET_IDX(heap, new_free);
405 }
406 list_push(heap, new_free);
407 // change size only if new chunk is created
408 header->size = nb_bytes;
409 }
410
411 // Update the chunk's header to set the allocated bit
412 header->allocated = 0x1;
413
414 // clean-up previous empty header info
415 header->fnext = header->fprev = 0;
416
417 // Return a pointer to the payload
418 return block + ALLOC_CHUNK_HEADER_SIZE;
419 }
420 // not found in current segment, let's try in bigger segment
421 seg++;
422 }
423 return NULL;
424}
425
432void mem_free(mem_ctx_t ctx, void *ptr)
433{
434 heap_t *heap = (heap_t *) ctx;
435 uint8_t *block = ((uint8_t *) ptr) - ALLOC_CHUNK_HEADER_SIZE;
436 header_t *header = (header_t *) block;
437
438 // ensure size is consistent
439 ensure_chunk_valid(heap, header);
440 // if not allocated, return
441 if (!header->allocated) {
442 return;
443 }
444 // try to coalesce with adjacent physical chunks (after and before)
445 // try with next physical chunk
446 if ((block + header->size) < (uint8_t *) heap->end) {
447 header = coalesce(heap, header, (header_t *) (block + header->size));
448 }
449 // try previous chunk
450 if (header->phys_prev != 0) {
451 header = coalesce(heap, header, GET_PTR(heap, header->phys_prev));
452 }
453
454 // free chunk and add it on top of free chunk LIFO
455 list_push(heap, header);
456}
457
465void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
466{
467 heap_t *heap = (heap_t *) ctx;
468 header_t *header = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
469#ifdef DEBUG_FREE_SEGMENTS
470 for (size_t i = 0; i < heap->nb_segs; i++) {
471 if (heap->free_segments[i] != NULL) {
472 header_t *head = heap->free_segments[i];
473 int nb_elemts = 0;
474 while (head) {
475 nb_elemts++;
476 head = head->fnext;
477 }
478 }
479 }
480#endif // DEBUG_FREE_SEGMENTS
481 while (header != NULL) {
482 uint8_t *block = (uint8_t *) header;
483 // break the loop if the callback returns true
484 if (callback(data, block, header->allocated, header->size)) {
485 return;
486 }
487 if (&block[header->size] < (uint8_t *) heap->end) {
488 header = (header_t *) &block[header->size];
489 }
490 else {
491 return;
492 }
493 }
494}
495
504{
505 memset(stat, 0, sizeof(mem_stat_t));
507 mem_parse(ctx, parse_callback, stat);
508}
#define GET_NEXT(_heap, _ptr)
Definition mem_alloc.c:56
#define GET_PTR(_heap, index)
Definition mem_alloc.c:53
#define ALLOC_CHUNK_HEADER_SIZE
Definition mem_alloc.c:27
struct header_s header_t
#define PAYLOAD_ALIGNEMENT
Definition mem_alloc.c:23
#define GET_SEGMENT(_size)
Definition mem_alloc.c:58
mem_ctx_t mem_init(void *heap_start, size_t heap_size)
initializes the heap for this allocator
Definition mem_alloc.c:309
#define NB_LINEAR_SEGMENTS
Definition mem_alloc.c:35
#define NB_SUB_SEGMENTS
Definition mem_alloc.c:50
void * mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
allocates a buffer of the given size, if possible
Definition mem_alloc.c:346
#define NB_MAX_SEGMENTS
Definition mem_alloc.c:48
#define HEAP_HEADER_SIZE
Definition mem_alloc.c:31
#define FREE_CHUNK_HEADER_SIZE
Definition mem_alloc.c:29
void mem_free(mem_ctx_t ctx, void *ptr)
frees the given buffer
Definition mem_alloc.c:432
void mem_stat(mem_ctx_t *ctx, mem_stat_t *stat)
function used to get statistics (nb chunks, nb allocated chunks, total size) about the heap
Definition mem_alloc.c:503
void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
parse the heap
Definition mem_alloc.c:465
#define GET_PREV(_heap, _ptr)
Definition mem_alloc.c:55
#define GET_IDX(_heap, _ptr)
Definition mem_alloc.c:54
Dynamic memory allocation API.
void * mem_ctx_t
type shared externally
Definition mem_alloc.h:27
bool(* mem_parse_callback_t)(void *data, uint8_t *addr, bool allocated, size_t size)
function called for each chunk found in the heap.
Definition mem_alloc.h:40
uint16_t fprev
previous free chunk in the same segment
Definition mem_alloc.c:69
uint16_t size
Definition mem_alloc.c:64
uint16_t phys_prev
physical previous chunk
Definition mem_alloc.c:67
uint16_t fnext
next free chunk in the same segment
Definition mem_alloc.c:70
uint16_t allocated
Definition mem_alloc.c:65
void * end
end of headp buffer, for consistency check
Definition mem_alloc.c:74
size_t nb_segs
actual number of used segments
Definition mem_alloc.c:75
uint16_t free_segments[NB_MAX_SEGMENTS *NB_SUB_SEGMENTS]
Definition mem_alloc.c:76
this structure, filled by mem_stat
Definition mem_alloc.h:46
size_t allocated_size
nb bytes allocated in the heap (including headers)
Definition mem_alloc.h:50
size_t total_size
total size of the heap (including allocator internal data)
Definition mem_alloc.h:47
uint32_t nb_allocated
number of allocated chunks
Definition mem_alloc.h:52
size_t free_size
Definition mem_alloc.h:48
uint32_t nb_chunks
total number of chunks
Definition mem_alloc.h:51