Browse Source

kernel: sys_heap: Fix chunk size request validation

Updates the heap code to ensure that when converting the requested
number of bytes to chunks, we do not return a value that exceeds
the number of chunks in the heap.

Fixes #90306

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
pull/91043/head
Peter Mitsis 1 month ago committed by Benjamin Cabé
parent
commit
811302e6d2
  1. 16
      lib/heap/heap.c
  2. 26
      lib/heap/heap.h

16
lib/heap/heap.c

@ -265,12 +265,13 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes) @@ -265,12 +265,13 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
struct z_heap *h = heap->heap;
void *mem;
if ((bytes == 0U) || size_too_big(h, bytes)) {
if (bytes == 0U) {
return NULL;
}
chunksz_t chunk_sz = bytes_to_chunksz(h, bytes);
chunksz_t chunk_sz = bytes_to_chunksz(h, bytes, 0);
chunkid_t c = alloc_chunk(h, chunk_sz);
if (c == 0U) {
return NULL;
}
@ -330,7 +331,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes) @@ -330,7 +331,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
}
__ASSERT((align & (align - 1)) == 0, "align must be a power of 2");
if ((bytes == 0) || size_too_big(h, bytes)) {
if (bytes == 0) {
return NULL;
}
@ -339,7 +340,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes) @@ -339,7 +340,7 @@ void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
* We over-allocate to account for alignment and then free
* the extra allocations afterwards.
*/
chunksz_t padded_sz = bytes_to_chunksz(h, bytes + align - gap);
chunksz_t padded_sz = bytes_to_chunksz(h, bytes, align - gap);
chunkid_t c0 = alloc_chunk(h, padded_sz);
if (c0 == 0) {
@ -387,13 +388,10 @@ static bool inplace_realloc(struct sys_heap *heap, void *ptr, size_t bytes) @@ -387,13 +388,10 @@ static bool inplace_realloc(struct sys_heap *heap, void *ptr, size_t bytes)
{
struct z_heap *h = heap->heap;
if (size_too_big(h, bytes)) {
return false;
}
chunkid_t c = mem_to_chunkid(h, ptr);
size_t align_gap = (uint8_t *)ptr - (uint8_t *)chunk_mem(h, c);
chunksz_t chunks_need = bytes_to_chunksz(h, bytes + align_gap);
chunksz_t chunks_need = bytes_to_chunksz(h, bytes, align_gap);
if (chunk_size(h, c) == chunks_need) {
/* We're good already */

26
lib/heap/heap.h

@ -232,14 +232,25 @@ static inline chunksz_t chunksz(size_t bytes) @@ -232,14 +232,25 @@ static inline chunksz_t chunksz(size_t bytes)
return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
}
static inline chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes)
/**
* Convert the number of requested bytes to chunks and clamp it to facilitate
* error handling. As some of the heap is used for metadata, there will never
* be enough space for 'end_chunk' chunks. Also note that since 'size_t' may
* be 64-bits wide, clamping guards against overflow when converting to the
* 32-bit wide 'chunksz_t'.
*/
static ALWAYS_INLINE chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes, size_t extra)
{
return chunksz(chunk_header_bytes(h) + bytes);
size_t chunks = (bytes / CHUNK_UNIT) + (extra / CHUNK_UNIT);
size_t oddments = ((bytes % CHUNK_UNIT) + (extra % CHUNK_UNIT) +
chunk_header_bytes(h) + CHUNK_UNIT - 1U) / CHUNK_UNIT;
return (chunksz_t)MIN(chunks + oddments, h->end_chunk);
}
static inline chunksz_t min_chunk_size(struct z_heap *h)
{
return bytes_to_chunksz(h, 1);
return chunksz(chunk_header_bytes(h) + 1);
}
static inline size_t chunksz_to_bytes(struct z_heap *h, chunksz_t chunksz_in)
@ -253,15 +264,6 @@ static inline int bucket_idx(struct z_heap *h, chunksz_t sz) @@ -253,15 +264,6 @@ static inline int bucket_idx(struct z_heap *h, chunksz_t sz)
return 31 - __builtin_clz(usable_sz);
}
static inline bool size_too_big(struct z_heap *h, size_t bytes)
{
/*
* Quick check to bail out early if size is too big.
* Also guards against potential arithmetic overflows elsewhere.
*/
return (bytes / CHUNK_UNIT) >= h->end_chunk;
}
static inline void get_alloc_info(struct z_heap *h, size_t *alloc_bytes,
size_t *free_bytes)
{

Loading…
Cancel
Save