Browse Source

lib/os/heap: straighten up our type usage

The size_t usage, especially in struct z_heap_bucket made the heap
header almost 2x bigger than it needs to be on 64-bit systems.
This prompted me to clean up our type usage to make the code more
efficient and easier to understand. From now on:

- chunkid_t is for absolute chunk position measured in chunk units
- chunksz_t is for chunk sizes measured in chunk units
- size_t is for buffer sizes measured in bytes

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
pull/33489/head
Nicolas Pitre 4 years ago committed by Anas Nashif
parent
commit
b1eefc0c26
  1. 8
      lib/os/heap-validate.c
  2. 44
      lib/os/heap.c
  3. 31
      lib/os/heap.h

8
lib/os/heap-validate.c

@ -17,7 +17,7 @@ @@ -17,7 +17,7 @@
* running one and corrupting it. YMMV.
*/
static size_t max_chunkid(struct z_heap *h)
static chunkid_t max_chunkid(struct z_heap *h)
{
return h->end_chunk - min_chunk_size(h);
}
@ -329,7 +329,7 @@ void heap_print_info(struct z_heap *h, bool dump_chunks) @@ -329,7 +329,7 @@ void heap_print_info(struct z_heap *h, bool dump_chunks)
" -----------------------------------------------------------\n");
for (i = 0; i < nb_buckets; i++) {
chunkid_t first = h->buckets[i].next;
size_t largest = 0;
chunksz_t largest = 0;
int count = 0;
if (first) {
@ -341,7 +341,7 @@ void heap_print_info(struct z_heap *h, bool dump_chunks) @@ -341,7 +341,7 @@ void heap_print_info(struct z_heap *h, bool dump_chunks)
} while (curr != first);
}
if (count) {
printk("%9d %12d %12d %12zd %12zd\n",
printk("%9d %12d %12d %12d %12zd\n",
i, (1 << i) - 1 + min_chunk_size(h), count,
largest, chunksz_to_bytes(h, largest));
}
@ -360,7 +360,7 @@ void heap_print_info(struct z_heap *h, bool dump_chunks) @@ -360,7 +360,7 @@ void heap_print_info(struct z_heap *h, bool dump_chunks)
free_bytes += chunksz_to_bytes(h, chunk_size(h, c));
}
if (dump_chunks) {
printk("chunk %4zd: [%c] size=%-4zd left=%-4zd right=%zd\n",
printk("chunk %4d: [%c] size=%-4d left=%-4d right=%d\n",
c,
chunk_used(h, c) ? '*'
: solo_free_header(h, c) ? '.'

44
lib/os/heap.c

@ -13,7 +13,7 @@ static void *chunk_mem(struct z_heap *h, chunkid_t c) @@ -13,7 +13,7 @@ static void *chunk_mem(struct z_heap *h, chunkid_t c)
chunk_unit_t *buf = chunk_buf(h);
uint8_t *ret = ((uint8_t *)&buf[c]) + chunk_header_bytes(h);
CHECK(!(((size_t)ret) & (big_heap(h) ? 7 : 3)));
CHECK(!(((uintptr_t)ret) & (big_heap(h) ? 7 : 3)));
return ret;
}
@ -90,9 +90,9 @@ static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc) @@ -90,9 +90,9 @@ static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
CHECK(rc > lc);
CHECK(rc - lc < chunk_size(h, lc));
size_t sz0 = chunk_size(h, lc);
size_t lsz = rc - lc;
size_t rsz = sz0 - lsz;
chunksz_t sz0 = chunk_size(h, lc);
chunksz_t lsz = rc - lc;
chunksz_t rsz = sz0 - lsz;
set_chunk_size(h, lc, lsz);
set_chunk_size(h, rc, rsz);
@ -103,7 +103,7 @@ static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc) @@ -103,7 +103,7 @@ static void split_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
/* Does not modify free list */
static void merge_chunks(struct z_heap *h, chunkid_t lc, chunkid_t rc)
{
size_t newsz = chunk_size(h, lc) + chunk_size(h, rc);
chunksz_t newsz = chunk_size(h, lc) + chunk_size(h, rc);
set_chunk_size(h, lc, newsz);
set_left_chunk_size(h, right_chunk(h, rc), newsz);
@ -167,7 +167,7 @@ void sys_heap_free(struct sys_heap *heap, void *mem) @@ -167,7 +167,7 @@ void sys_heap_free(struct sys_heap *heap, void *mem)
free_chunk(h, c);
}
static chunkid_t alloc_chunk(struct z_heap *h, size_t sz)
static chunkid_t alloc_chunk(struct z_heap *h, chunksz_t sz)
{
int bi = bucket_idx(h, sz);
struct z_heap_bucket *b = &h->buckets[bi];
@ -205,7 +205,7 @@ static chunkid_t alloc_chunk(struct z_heap *h, size_t sz) @@ -205,7 +205,7 @@ static chunkid_t alloc_chunk(struct z_heap *h, size_t sz)
/* Otherwise pick the smallest non-empty bucket guaranteed to
* fit and use that unconditionally.
*/
size_t bmask = h->avail_buckets & ~((1 << (bi + 1)) - 1);
uint32_t bmask = h->avail_buckets & ~((1 << (bi + 1)) - 1);
if (bmask != 0U) {
int minbucket = __builtin_ctz(bmask);
@ -227,7 +227,7 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes) @@ -227,7 +227,7 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
return NULL;
}
size_t chunk_sz = bytes_to_chunksz(h, bytes);
chunksz_t chunk_sz = bytes_to_chunksz(h, bytes);
chunkid_t c = alloc_chunk(h, chunk_sz);
if (c == 0U) {
return NULL;
@ -246,7 +246,7 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes) @@ -246,7 +246,7 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
{
struct z_heap *h = heap->heap;
size_t padded_sz, gap, rewind;
size_t gap, rewind;
/*
* Split align and rewind values (if any).
@ -277,7 +277,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes) @@ -277,7 +277,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
* We over-allocate to account for alignment and then free
* the extra allocations afterwards.
*/
padded_sz = bytes_to_chunksz(h, bytes + align - gap);
chunksz_t padded_sz = bytes_to_chunksz(h, bytes + align - gap);
chunkid_t c0 = alloc_chunk(h, padded_sz);
if (c0 == 0) {
@ -333,7 +333,7 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr, @@ -333,7 +333,7 @@ void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
chunkid_t c = mem_to_chunkid(h, ptr);
chunkid_t rc = right_chunk(h, c);
size_t align_gap = (uint8_t *)ptr - (uint8_t *)chunk_mem(h, c);
size_t chunks_need = bytes_to_chunksz(h, bytes + align_gap);
chunksz_t chunks_need = bytes_to_chunksz(h, bytes + align_gap);
if (align && ((uintptr_t)ptr & (align - 1))) {
/* ptr is not sufficiently aligned */
@ -387,22 +387,21 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes) @@ -387,22 +387,21 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
/* Round the start up, the end down */
uintptr_t addr = ROUND_UP(mem, CHUNK_UNIT);
uintptr_t end = ROUND_DOWN((uint8_t *)mem + bytes, CHUNK_UNIT);
size_t buf_sz = (end - addr) / CHUNK_UNIT;
chunksz_t heap_sz = (end - addr) / CHUNK_UNIT;
CHECK(end > addr);
__ASSERT(buf_sz > chunksz(sizeof(struct z_heap)), "heap size is too small");
__ASSERT(heap_sz > chunksz(sizeof(struct z_heap)), "heap size is too small");
struct z_heap *h = (struct z_heap *)addr;
heap->heap = h;
h->chunk0_hdr_area = 0;
h->end_chunk = buf_sz;
h->end_chunk = heap_sz;
h->avail_buckets = 0;
int nb_buckets = bucket_idx(h, buf_sz) + 1;
size_t chunk0_size = chunksz(sizeof(struct z_heap) +
int nb_buckets = bucket_idx(h, heap_sz) + 1;
chunksz_t chunk0_size = chunksz(sizeof(struct z_heap) +
nb_buckets * sizeof(struct z_heap_bucket));
__ASSERT(chunk0_size + min_chunk_size(h) < buf_sz, "heap size is too small");
__ASSERT(chunk0_size + min_chunk_size(h) < heap_sz, "heap size is too small");
for (int i = 0; i < nb_buckets; i++) {
h->buckets[i].next = 0;
@ -410,16 +409,17 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes) @@ -410,16 +409,17 @@ void sys_heap_init(struct sys_heap *heap, void *mem, size_t bytes)
/* chunk containing our struct z_heap */
set_chunk_size(h, 0, chunk0_size);
set_left_chunk_size(h, 0, 0);
set_chunk_used(h, 0, true);
/* chunk containing the free heap */
set_chunk_size(h, chunk0_size, buf_sz - chunk0_size);
set_chunk_size(h, chunk0_size, heap_sz - chunk0_size);
set_left_chunk_size(h, chunk0_size, chunk0_size);
/* the end marker chunk */
set_chunk_size(h, buf_sz, 0);
set_left_chunk_size(h, buf_sz, buf_sz - chunk0_size);
set_chunk_used(h, buf_sz, true);
set_chunk_size(h, heap_sz, 0);
set_left_chunk_size(h, heap_sz, heap_sz - chunk0_size);
set_chunk_used(h, heap_sz, true);
free_list_add(h, chunk0_size);
}

31
lib/os/heap.h

@ -50,7 +50,6 @@ @@ -50,7 +50,6 @@
* the following chunk at the top. This ordering allows for quick buffer
* overflow detection by testing left_chunk(c + chunk_size(c)) == c.
*/
typedef size_t chunkid_t;
enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT };
@ -58,18 +57,22 @@ enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT }; @@ -58,18 +57,22 @@ enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT };
typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
/* big_heap needs uint32_t, small_heap needs uint16_t */
typedef uint32_t chunkid_t;
typedef uint32_t chunksz_t;
struct z_heap_bucket {
chunkid_t next;
};
struct z_heap {
uint64_t chunk0_hdr_area; /* matches the largest header */
chunkid_t chunk0_hdr[2];
chunkid_t end_chunk;
uint32_t avail_buckets;
struct z_heap_bucket buckets[0];
};
static inline bool big_heap_chunks(size_t chunks)
static inline bool big_heap_chunks(chunksz_t chunks)
{
return sizeof(void *) > 4U || chunks > 0x7fffU;
}
@ -90,8 +93,8 @@ static inline chunk_unit_t *chunk_buf(struct z_heap *h) @@ -90,8 +93,8 @@ static inline chunk_unit_t *chunk_buf(struct z_heap *h)
return (chunk_unit_t *)h;
}
static inline size_t chunk_field(struct z_heap *h, chunkid_t c,
enum chunk_fields f)
static inline chunkid_t chunk_field(struct z_heap *h, chunkid_t c,
enum chunk_fields f)
{
chunk_unit_t *buf = chunk_buf(h);
void *cmem = &buf[c];
@ -125,7 +128,7 @@ static inline bool chunk_used(struct z_heap *h, chunkid_t c) @@ -125,7 +128,7 @@ static inline bool chunk_used(struct z_heap *h, chunkid_t c)
return chunk_field(h, c, SIZE_AND_USED) & 1U;
}
static inline size_t chunk_size(struct z_heap *h, chunkid_t c)
static inline chunksz_t chunk_size(struct z_heap *h, chunkid_t c)
{
return chunk_field(h, c, SIZE_AND_USED) >> 1;
}
@ -155,7 +158,7 @@ static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used) @@ -155,7 +158,7 @@ static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
* when its size is modified, and potential set_chunk_used() is always
* invoked after set_chunk_size().
*/
static inline void set_chunk_size(struct z_heap *h, chunkid_t c, size_t size)
static inline void set_chunk_size(struct z_heap *h, chunkid_t c, chunksz_t size)
{
chunk_set(h, c, SIZE_AND_USED, size << 1);
}
@ -193,7 +196,7 @@ static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c) @@ -193,7 +196,7 @@ static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c)
}
static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
size_t size)
chunksz_t size)
{
chunk_set(h, c, LEFT_SIZE, size);
}
@ -213,29 +216,29 @@ static inline size_t heap_footer_bytes(size_t size) @@ -213,29 +216,29 @@ static inline size_t heap_footer_bytes(size_t size)
return big_heap_bytes(size) ? 8 : 4;
}
static inline size_t chunksz(size_t bytes)
static inline chunksz_t chunksz(size_t bytes)
{
return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
}
static inline size_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
static inline chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
{
return chunksz(chunk_header_bytes(h) + bytes);
}
static inline int min_chunk_size(struct z_heap *h)
static inline chunksz_t min_chunk_size(struct z_heap *h)
{
return bytes_to_chunksz(h, 1);
}
static inline size_t chunksz_to_bytes(struct z_heap *h, size_t chunksz)
static inline size_t chunksz_to_bytes(struct z_heap *h, chunksz_t chunksz)
{
return chunksz * CHUNK_UNIT - chunk_header_bytes(h);
}
static inline int bucket_idx(struct z_heap *h, size_t sz)
static inline int bucket_idx(struct z_heap *h, chunksz_t sz)
{
size_t usable_sz = sz - min_chunk_size(h) + 1;
unsigned int usable_sz = sz - min_chunk_size(h) + 1;
return 31 - __builtin_clz(usable_sz);
}

Loading…
Cancel
Save