Embedded SDK
Embedded SDK
Loading...
Searching...
No Matches
mem_alloc.c
Go to the documentation of this file.
1
6/*********************
7 * INCLUDES
8 *********************/
9#include <stdio.h>
10#include <string.h>
11#include "exceptions.h"
12#include "errors.h"
13#include "os_helpers.h"
14#include "os_math.h"
15#include "os_print.h"
16#include "os_utils.h"
17#include "mem_alloc.h"
18
19/*********************
20 * DEFINES
21 *********************/
22// Alignment for payload data size
23#define PAYLOAD_ALIGNEMENT 4
24// Alignment for returned payload
25#define PAYLOAD_DATA_ALIGNEMENT 8
26// sizeof of header for a allocated chunk
27#define ALLOC_CHUNK_HEADER_SIZE 4
28// sizeof of header for a free chunk
29#define FREE_CHUNK_HEADER_SIZE 8
30// size of heap header
31#define HEAP_HEADER_SIZE \
32 ((sizeof(heap_t) + PAYLOAD_DATA_ALIGNEMENT - 1) & ~(PAYLOAD_DATA_ALIGNEMENT - 1))
33
34// number of 2^N segments aggregated as linear
35#define NB_LINEAR_SEGMENTS 5
36// 10 segments (the first one is linear) enable using up to ~32k bytes chunks, which is more than
37// enough
38/* Segregated free list bucket sizes:
39- 0: 1-63 bytes
40- 1: 64-127 bytes
41- 2: 128-255 bytes
42- 4: 512-1023 bytes
43- 6: 2048-4095 bytes
44- 7: 4096-8191 bytes
45- n: 2^(n+5)-(2^(n+6)-1) bytes
46- 9: 16384-32767 bytes
47*/
48#define NB_MAX_SEGMENTS 10
49// 4 sub-segments to have a better segregation of each segments
50#define NB_SUB_SEGMENTS 4
51
52// index is basically the offset in u64 from beginning of full heap buffer
53#define GET_PTR(_heap, index) ((header_t *) (((uint8_t *) _heap) + ((index) << 3)))
54#define GET_IDX(_heap, _ptr) ((((uint8_t *) (_ptr)) - ((uint8_t *) _heap)) >> 3)
55#define GET_PREV(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fprev << 3)))
56#define GET_NEXT(_heap, _ptr) ((header_t *) (((uint8_t *) _heap) + ((_ptr)->fnext << 3)))
57
58/**********************
59 * TYPEDEFS
60 **********************/
61typedef struct header_s {
62 uint16_t size : 15; // the biggest size is 0x7FFF = 0x32767
64
66
70
71typedef struct {
72 void *end;
73 size_t nb_segs;
75} heap_t;
76
77/**********************
78 * STATIC VARIABLES
79 **********************/
80
81/**********************
82 * STATIC PROTOTYPES
83 **********************/
84
85/**********************
86 * LOCAL FUNCTIONS
87 **********************/
88// This function returns the index into the array segregated explicit free lists
89// corresponding to the given size.
90// For 2^n <= size < 2^(n+1), this function returns n.
91static inline int seglist_index(heap_t *heap, size_t size)
92{
93 size_t seg;
94 uint8_t sub_segment;
95
96 seg = MAX(NB_LINEAR_SEGMENTS, (31 - __builtin_clz(size)));
97 sub_segment = (size >> (seg - 1)) & 0x3;
98 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
99 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
100 seg -= NB_LINEAR_SEGMENTS;
101 // check consistency
102 if (seg >= heap->nb_segs) {
103 return -1;
104 }
105 return seg * NB_SUB_SEGMENTS + sub_segment;
106}
107
108// This function returns the index into the array segregated explicit free lists
109// corresponding to the given size, being sure to be big enough (so rounded to next sub-segment).
110static inline int seglist_index_up(heap_t *heap, size_t size)
111{
112 size_t seg;
113 uint8_t sub_segment;
114 size_t sub_segment_size;
115
116 seg = MAX(NB_LINEAR_SEGMENTS, (31 - __builtin_clz(size)));
117 sub_segment_size = (1 << (seg - 1));
118
119 // round size to next sub-segment
120 size += sub_segment_size - 1;
121 size &= ~(sub_segment_size - 1);
122
123 seg = MAX(NB_LINEAR_SEGMENTS, (31 - __builtin_clz(size)));
124 sub_segment = (size >> (seg - 1)) & 0x3;
125
126 // from size in [0 : 63[, segment is 0 to 5 but is forced to 0
127 // from size in [2^6 : 2^13[, segment is [6 : 12] but is forced to [1 : 7]
128 seg -= NB_LINEAR_SEGMENTS;
129
130 // check consistency
131 if (seg >= heap->nb_segs) {
132 return -1;
133 }
134 return seg * NB_SUB_SEGMENTS + sub_segment;
135}
136
137// remove item from linked list
138static inline void list_remove(heap_t *heap, uint16_t *first_free, uint16_t elem)
139{
140 header_t *elem_ptr = GET_PTR(heap, elem);
141 // if was the first, set a new first
142 if (*first_free == elem) {
143 *first_free = elem_ptr->fnext;
144 // if the current first was not alone
145 if (*first_free) {
146 GET_PTR(heap, *first_free)->fprev = 0;
147 }
148 return;
149 }
150 // link previous to following, if existing previous
151 if (elem_ptr->fprev) {
152 GET_PREV(heap, elem_ptr)->fnext = elem_ptr->fnext;
153 }
154 // link following to previous, if existing following
155 if (elem_ptr->fnext) {
156 GET_NEXT(heap, elem_ptr)->fprev = elem_ptr->fprev;
157 }
158}
159
160// add item in LIFO
161static inline void list_push(heap_t *heap, header_t *header)
162{
163 int seg_index = seglist_index(heap, header->size);
164 uint16_t first_idx = heap->free_segments[seg_index];
165 if (seg_index < 0) {
166 return;
167 }
168 // if it's already the first item, nothing to do
169 if (first_idx == GET_IDX(heap, header)) {
170 return;
171 }
172 // link this new first to following (previous first)
173 header->fnext = first_idx;
174 header->fprev = 0;
175 header->allocated = 0;
176 // link following (previous first) to new first
177 if (first_idx != 0) {
178 header_t *first = GET_PTR(heap, first_idx);
179 first->fprev = GET_IDX(heap, header);
180 }
181 // replace new first
182 heap->free_segments[seg_index] = GET_IDX(heap, header);
183}
184
185// ensure the chunk is valid
186static inline void ensure_chunk_valid(heap_t *heap, header_t *header)
187{
188 uint8_t *block = (uint8_t *) header;
189 // ensure size is valid (multiple of pointers) and minimal size of 4 pointers)
190 if ((header->size & (PAYLOAD_ALIGNEMENT - 1)) || (header->size < FREE_CHUNK_HEADER_SIZE)
191 || ((block + header->size) > (uint8_t *) heap->end)) {
192 PRINTF("invalid size: header->size = %d!\n", header->size);
193 THROW(EXCEPTION_CORRUPT);
194 }
195 // ensure phys_prev is valid
196 if (header->phys_prev != 0) {
197 header_t *prev = GET_PTR(heap, header->phys_prev);
198 block = (uint8_t *) heap;
199 if (((uint8_t *) prev < &block[HEAP_HEADER_SIZE]) || ((void *) prev > heap->end)) {
200 PRINTF("corrupted prev_chunk\n");
201 THROW(EXCEPTION_CORRUPT);
202 }
203 // ensure that next of prev is current
204 if ((((uint8_t *) prev) + prev->size) != (uint8_t *) header) {
205 PRINTF("corrupted prev_chunk\n");
206 THROW(EXCEPTION_CORRUPT);
207 }
208 }
209}
210
211// coalesce the two given chunks
212static header_t *coalesce(heap_t *heap, header_t *header, header_t *neighbour)
213{
214 uint16_t neighbour_idx = GET_IDX(heap, neighbour);
215 ensure_chunk_valid(heap, neighbour);
216
217 // if not allocated, coalesce
218 if (!neighbour->allocated) {
219 // remove this neighbour from its free list
220 list_remove(
221 heap, &heap->free_segments[seglist_index(heap, neighbour->size)], neighbour_idx);
222 // link the current next physical chunk (if existing) to this new chunk
223 header_t *next;
224 if (header < neighbour) {
225 next = (header_t *) (((uint8_t *) neighbour) + neighbour->size);
226 if ((void *) next < heap->end) {
227 next->phys_prev = GET_IDX(heap, header);
228 }
229 header->size += neighbour->size;
230 // clean-up neighbour to avoid leaking
231 memset(neighbour, 0, sizeof(*neighbour));
232 }
233 else {
234 next = (header_t *) (((uint8_t *) header) + header->size);
235 // ensure next is valid (inside the heap buffer) before linking it
236 if ((void *) next < heap->end) {
237 next->phys_prev = neighbour_idx;
238 }
239 neighbour->size += header->size;
240 // clean-up header to avoid leaking
241 memset(header, 0, sizeof(*header));
242 // header points now to neighbour
243 header = neighbour;
244 }
245 }
246 return header;
247}
248
249static bool parse_callback(void *data, uint8_t *addr, bool allocated, size_t size)
250{
251 mem_stat_t *stat = (mem_stat_t *) data;
252
253 UNUSED(addr);
254 // error reached
255 if (size == 0) {
256 return true;
257 }
258 stat->nb_chunks++;
259 if (allocated) {
260 stat->nb_allocated++;
261 stat->allocated_size += size;
262 }
263 else {
264 stat->free_size += size;
265 }
266 stat->total_size += size;
267
268 return false;
269}
270
271/**********************
272 * GLOBAL FUNCTIONS
273 **********************/
274
283mem_ctx_t mem_init(void *heap_start, size_t heap_size)
284{
285 heap_t *heap = (heap_t *) heap_start;
286
287 // size must be a multiple of 8, and at least 200 bytes
288 if ((heap_size & (FREE_CHUNK_HEADER_SIZE - 1)) || (heap_size < 200)
289 || (heap_size > (0x7FF8 + HEAP_HEADER_SIZE))) {
290 return NULL;
291 }
292
293 heap->end = ((uint8_t *) heap_start) + heap_size;
294
295 // compute number of segments
296 heap->nb_segs = 31 - __builtin_clz(heap_size - HEAP_HEADER_SIZE) - NB_LINEAR_SEGMENTS + 1;
297 if (heap->nb_segs > NB_MAX_SEGMENTS) {
298 return NULL;
299 }
300 memset(heap->free_segments, 0, heap->nb_segs * NB_SUB_SEGMENTS * sizeof(uint16_t));
301
302 // initiate free chunk LIFO with the whole heap as a free chunk
303 header_t *first_free = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
304 first_free->size = heap_size - HEAP_HEADER_SIZE;
305 first_free->phys_prev = 0;
306 list_push(heap, first_free);
307
308 return (mem_ctx_t) heap_start;
309}
310
320void *mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
321{
322 heap_t *heap = (heap_t *) ctx;
323 int seg;
324
325 // nb_bytes must be > 0
326 if (nb_bytes == 0) {
327 return NULL;
328 }
329
330 // Adjust size to include header and satisfy alignment requirements, etc.
331 nb_bytes += PAYLOAD_ALIGNEMENT - 1;
332 nb_bytes &= ~(PAYLOAD_ALIGNEMENT - 1);
333
334 // if nb_bytes = 8N + 4, just add 4 for header
335 if (nb_bytes & PAYLOAD_ALIGNEMENT) {
336 nb_bytes += ALLOC_CHUNK_HEADER_SIZE; // for header (size + prev_chunk)
337 }
338 else {
339 // if nb_bytes = 8N, add 8 for header + unused footer
341 }
342
343 // get the segment sure to be holding this size
344 // if nb_bytes == 2^n , all chunks in [2^n: 2^(n+1)[ are ok
345 // if nb_bytes > 2^n , all chunks in [2^(n+1): 2^(n+2)[ are ok
346 seg = seglist_index_up(heap, nb_bytes);
347 if (seg < 0) {
348 return NULL;
349 }
350 // Loop through all increasing size segments
351 while ((size_t) seg < (heap->nb_segs * NB_SUB_SEGMENTS)) {
352 uint16_t chunk_idx = heap->free_segments[seg];
353 // if this segment is not empty, it must contain a big-enough chunk
354 if (chunk_idx != 0) {
355 header_t *header = GET_PTR(heap, chunk_idx);
356
357 // ensure chunk is consistent
358 ensure_chunk_valid(heap, header);
359 // this block shall always be big enough
360 if (nb_bytes > header->size) {
361 // probably a big issue, maybe necessary to throw an exception
362 return NULL;
363 }
364 uint8_t *block = (uint8_t *) header;
365
366 // remove the block from the segregated list
367 list_remove(heap, &heap->free_segments[seg], chunk_idx);
368 // We could turn the excess bytes into a new free block
369 // the minimum size is the size of an empty chunk
370 if (header->size >= (nb_bytes + FREE_CHUNK_HEADER_SIZE)) {
371 header_t *new_free = (header_t *) &block[nb_bytes];
372 new_free->size = header->size - nb_bytes;
373 // link this new chunk to previous (found) one
374 new_free->phys_prev = chunk_idx;
375 // link the current next physical chunk to this new chunk (if existing)
376 header_t *next = (header_t *) &block[header->size];
377 if ((void *) next < heap->end) {
378 next->phys_prev = GET_IDX(heap, new_free);
379 }
380 list_push(heap, new_free);
381 // change size only if new chunk is created
382 header->size = nb_bytes;
383 }
384
385 // Update the chunk's header to set the allocated bit
386 header->allocated = 0x1;
387
388 // clean-up previous empty header info
389 header->fnext = header->fprev = 0;
390
391 // Return a pointer to the payload
392 return block + ALLOC_CHUNK_HEADER_SIZE;
393 }
394 // not found in current segment, let's try in bigger segment
395 seg++;
396 }
397 return NULL;
398}
399
406void mem_free(mem_ctx_t ctx, void *ptr)
407{
408 heap_t *heap = (heap_t *) ctx;
409 uint8_t *block = ((uint8_t *) ptr) - ALLOC_CHUNK_HEADER_SIZE;
410 header_t *header = (header_t *) block;
411
412 // ensure size is consistent
413 ensure_chunk_valid(heap, header);
414 // if not allocated, return
415 if (!header->allocated) {
416 return;
417 }
418 // try to coalesce with adjacent physical chunks (after and before)
419 // try with next physical chunk
420 if ((block + header->size) < (uint8_t *) heap->end) {
421 header = coalesce(heap, header, (header_t *) (block + header->size));
422 }
423 // try previous chunk
424 if (header->phys_prev != 0) {
425 header = coalesce(heap, header, GET_PTR(heap, header->phys_prev));
426 }
427
428 // free chunk and add it on top of free chunk LIFO
429 list_push(heap, header);
430}
431
439void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
440{
441 heap_t *heap = (heap_t *) ctx;
442 header_t *header = (header_t *) (((uint8_t *) heap) + HEAP_HEADER_SIZE);
443#ifdef DEBUG_FREE_SEGMENTS
444 for (size_t i = 0; i < heap->nb_segs; i++) {
445 if (heap->free_segments[i] != NULL) {
446 header_t *head = heap->free_segments[i];
447 int nb_elemts = 0;
448 while (head) {
449 nb_elemts++;
450 head = head->fnext;
451 }
452 }
453 }
454#endif // DEBUG_FREE_SEGMENTS
455 while (header != NULL) {
456 uint8_t *block = (uint8_t *) header;
457 // break the loop if the callback returns true
458 if (callback(data, block, header->allocated, header->size)) {
459 return;
460 }
461 if (&block[header->size] < (uint8_t *) heap->end) {
462 header = (header_t *) &block[header->size];
463 }
464 else {
465 return;
466 }
467 }
468}
469
478{
479 memset(stat, 0, sizeof(mem_stat_t));
481 mem_parse(ctx, parse_callback, stat);
482}
#define GET_NEXT(_heap, _ptr)
Definition mem_alloc.c:56
#define GET_PTR(_heap, index)
Definition mem_alloc.c:53
#define ALLOC_CHUNK_HEADER_SIZE
Definition mem_alloc.c:27
struct header_s header_t
#define PAYLOAD_ALIGNEMENT
Definition mem_alloc.c:23
mem_ctx_t mem_init(void *heap_start, size_t heap_size)
initializes the heap for this allocator
Definition mem_alloc.c:283
#define NB_LINEAR_SEGMENTS
Definition mem_alloc.c:35
#define NB_SUB_SEGMENTS
Definition mem_alloc.c:50
void * mem_alloc(mem_ctx_t ctx, size_t nb_bytes)
allocates a buffer of the given size, if possible
Definition mem_alloc.c:320
#define NB_MAX_SEGMENTS
Definition mem_alloc.c:48
#define HEAP_HEADER_SIZE
Definition mem_alloc.c:31
#define FREE_CHUNK_HEADER_SIZE
Definition mem_alloc.c:29
void mem_free(mem_ctx_t ctx, void *ptr)
frees the given buffer
Definition mem_alloc.c:406
void mem_stat(mem_ctx_t *ctx, mem_stat_t *stat)
function used to get statistics (nb chunks, nb allocated chunks, total size) about the heap
Definition mem_alloc.c:477
void mem_parse(mem_ctx_t ctx, mem_parse_callback_t callback, void *data)
parse the heap
Definition mem_alloc.c:439
#define GET_PREV(_heap, _ptr)
Definition mem_alloc.c:55
#define GET_IDX(_heap, _ptr)
Definition mem_alloc.c:54
Dynamic memory allocation API.
void * mem_ctx_t
type shared externally
Definition mem_alloc.h:27
bool(* mem_parse_callback_t)(void *data, uint8_t *addr, bool allocated, size_t size)
function called for each chunk found in the heap.
Definition mem_alloc.h:40
#define MAX(x, y)
Definition nbgl_types.h:103
uint16_t fprev
previous free chunk in the same segment
Definition mem_alloc.c:67
uint16_t size
Definition mem_alloc.c:62
uint16_t phys_prev
physical previous chunk
Definition mem_alloc.c:65
uint16_t fnext
next free chunk in the same segment
Definition mem_alloc.c:68
uint16_t allocated
Definition mem_alloc.c:63
void * end
end of headp buffer, for consistency check
Definition mem_alloc.c:72
size_t nb_segs
actual number of used segments
Definition mem_alloc.c:73
uint16_t free_segments[NB_MAX_SEGMENTS *NB_SUB_SEGMENTS]
Definition mem_alloc.c:74
this structure, filled by mem_stat
Definition mem_alloc.h:46
size_t allocated_size
nb bytes allocated in the heap (including headers)
Definition mem_alloc.h:50
size_t total_size
total size of the heap (including allocator internal data)
Definition mem_alloc.h:47
uint32_t nb_allocated
number of allocated chunks
Definition mem_alloc.h:52
size_t free_size
Definition mem_alloc.h:48
uint32_t nb_chunks
total number of chunks
Definition mem_alloc.h:51
unsigned short uint16_t
Definition usbd_conf.h:54
unsigned char uint8_t
Definition usbd_conf.h:53