Browse Source

kernel: Add k_heap synchronized memory allocator

This adds a k_heap data structure, a synchronized wrapper around a
sys_heap memory allocator.  As of this patch, it is an alternative
implementation to k_mem_pool() with somewhat better efficiency and
performance and more conventional (and convenient) behavior.

Note that commit involves some header motion to break dependencies.
The declaration for struct k_spinlock moves to kernel_structs.h, and a
bunch of includes were trimmed.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
pull/24351/head
Andy Ross 5 years ago committed by Andrew Boie
parent
commit
0dd83b8c2e
  1. 63
      include/kernel.h
  2. 43
      include/kernel_structs.h
  3. 7
      include/linker/common-ram.ld
  4. 32
      include/spinlock.h
  5. 4
      include/sys/sys_heap.h
  6. 1
      kernel/CMakeLists.txt
  7. 3
      kernel/atomic_c.c
  8. 60
      kernel/kheap.c

63
include/kernel.h

@ -4618,6 +4618,69 @@ static inline u32_t k_mem_slab_num_free_get(struct k_mem_slab *slab) @@ -4618,6 +4618,69 @@ static inline u32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
* @{
*/
/**
* @brief Initialize a k_heap
*
* This constructs a synchronized k_heap object over a memory region
* specified by the user. Note that while any alignment and size can
* be passed as valid parameters, internal alignment restrictions
* inside the inner sys_heap mean that not all bytes may be usable as
* allocated memory.
*
* @param h Heap struct to initialize
* @param mem Pointer to memory.
* @param bytes Size of memory region, in bytes
*/
void k_heap_init(struct k_heap *h, void *mem, size_t bytes);
/**
* @brief Allocate memory from a k_heap
*
* Allocates and returns a memory buffer from the memory region owned
* by the heap. If no memory is available immediately, the call will
* block for the specified timeout (constructed via the standard
* timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be
* freed. If the allocation cannot be performed by the expiration of
* the timeout, NULL will be returned.
*
* @param h Heap from which to allocate
* @param bytes Desired size of block to allocate
* @param timeout How long to wait, or K_NO_WAIT
* @return A pointer to valid heap memory, or NULL
*/
void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout);
/**
* @brief Free memory allocated by k_heap_alloc()
*
* Returns the specified memory block, which must have been returned
* from k_heap_alloc(), to the heap for use by other callers. Passing
* a NULL block is legal, and has no effect.
*
* @param h Heap to which to return the memory
* @param mem A valid memory block, or NULL
*/
void k_heap_free(struct k_heap *h, void *mem);
/**
* @brief Define a static k_heap
*
* This macro defines and initializes a static memory region and
* k_heap of the requested size. After kernel start, &name can be
* used as if k_heap_init() had been called.
*
* @param name Symbol name for the struct k_heap object
* @param bytes Size of memory region, in bytes
*/
#define K_HEAP_DEFINE(name, bytes) \
char __aligned(sizeof(void *)) kheap_##name[bytes]; \
Z_STRUCT_SECTION_ITERABLE(k_heap, name) = { \
.heap = { \
.init_mem = kheap_##name, \
.init_bytes = (bytes), \
}, \
}
/**
* @brief Statically define and initialize a memory pool.
*

43
include/kernel_structs.h

@ -21,10 +21,12 @@ @@ -21,10 +21,12 @@
#define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
#if !defined(_ASMLANGUAGE)
#include <sys/atomic.h>
#include <zephyr/types.h>
#include <sched_priq.h>
#include <sys/dlist.h>
#include <sys/util.h>
#include <sys/sys_heap.h>
#endif
#define K_NUM_PRIORITIES \
@ -240,6 +242,47 @@ struct _timeout { @@ -240,6 +242,47 @@ struct _timeout {
_timeout_func_t fn;
};
/* kernel spinlock type */
struct k_spinlock {
#ifdef CONFIG_SMP
atomic_t locked;
#endif
#ifdef CONFIG_SPIN_VALIDATE
/* Stores the thread that holds the lock with the locking CPU
* ID in the bottom two bits.
*/
uintptr_t thread_cpu;
#endif
#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
!defined(CONFIG_SPIN_VALIDATE)
/* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
* the k_spinlock struct will have no members. The result
* is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
*
* This size difference causes problems when the k_spinlock
* is embedded into another struct like k_msgq, because C and
* C++ will have different ideas on the offsets of the members
* that come after the k_spinlock member.
*
* To prevent this we add a 1 byte dummy member to k_spinlock
* when the user selects C++ support and k_spinlock would
* otherwise be empty.
*/
char dummy;
#endif
};
/* kernel synchronized heap struct */
struct k_heap {
struct sys_heap heap;
_wait_q_t wait_q;
struct k_spinlock lock;
};
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */

7
include/linker/common-ram.ld

@ -76,6 +76,13 @@ @@ -76,6 +76,13 @@
_k_mem_pool_list_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(_k_heap_area,,SUBALIGN(4))
{
_k_heap_list_start = .;
KEEP(*("._k_heap.static.*"))
_k_heap_list_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
SECTION_DATA_PROLOGUE(_k_sem_area,,SUBALIGN(4))
{
_k_sem_list_start = .;

32
include/spinlock.h

@ -7,6 +7,7 @@ @@ -7,6 +7,7 @@
#define ZEPHYR_INCLUDE_SPINLOCK_H_
#include <sys/atomic.h>
#include <kernel_structs.h>
/* There's a spinlock validation framework available when asserts are
* enabled. It adds a relatively hefty overhead (about 3k or so) to
@ -28,37 +29,6 @@ struct k_spinlock_key { @@ -28,37 +29,6 @@ struct k_spinlock_key {
typedef struct k_spinlock_key k_spinlock_key_t;
struct k_spinlock {
#ifdef CONFIG_SMP
atomic_t locked;
#endif
#ifdef CONFIG_SPIN_VALIDATE
/* Stores the thread that holds the lock with the locking CPU
* ID in the bottom two bits.
*/
uintptr_t thread_cpu;
#endif
#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
!defined(CONFIG_SPIN_VALIDATE)
/* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
* the k_spinlock struct will have no members. The result
* is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
*
* This size difference causes problems when the k_spinlock
* is embedded into another struct like k_msgq, because C and
* C++ will have different ideas on the offsets of the members
* that come after the k_spinlock member.
*
* To prevent this we add a 1 byte dummy member to k_spinlock
* when the user selects C++ support and k_spinlock would
* otherwise be empty.
*/
char dummy;
#endif
};
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
{
ARG_UNUSED(l);

4
include/sys/sys_heap.h

@ -6,7 +6,9 @@ @@ -6,7 +6,9 @@
#ifndef ZEPHYR_INCLUDE_SYS_SYS_HEAP_H_
#define ZEPHYR_INCLUDE_SYS_SYS_HEAP_H_
#include <kernel.h>
#include <stddef.h>
#include <stdbool.h>
#include <zephyr/types.h>
/* Simple, fast heap implementation.
*

1
kernel/CMakeLists.txt

@ -8,6 +8,7 @@ add_library(kernel @@ -8,6 +8,7 @@ add_library(kernel
fatal.c
idle.c
init.c
kheap.c
mailbox.c
mem_slab.c
mempool.c

3
kernel/atomic_c.c

@ -18,10 +18,11 @@ @@ -18,10 +18,11 @@
* (originally from x86's atomic.c)
*/
#include <sys/atomic.h>
#include <toolchain.h>
#include <arch/cpu.h>
#include <spinlock.h>
#include <sys/atomic.h>
#include <kernel_structs.h>
/* Single global spinlock for atomic operations. This is fallback
* code, not performance sensitive. At least by not using irq_lock()

60
kernel/kheap.c

@ -0,0 +1,60 @@ @@ -0,0 +1,60 @@
/*
* Copyright (c) 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <ksched.h>
#include <wait_q.h>
#include <init.h>
void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
{
z_waitq_init(&h->wait_q);
sys_heap_init(&h->heap, mem, bytes);
}
static int statics_init(struct device *unused)
{
ARG_UNUSED(unused);
Z_STRUCT_SECTION_FOREACH(k_heap, h) {
k_heap_init(h, h->heap.init_mem, h->heap.init_bytes);
}
return 0;
}
SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
{
s64_t now, end = z_timeout_end_calc(timeout);
void *ret = NULL;
k_spinlock_key_t key = k_spin_lock(&h->lock);
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
while (ret == NULL) {
ret = sys_heap_alloc(&h->heap, bytes);
now = z_tick_get();
if ((ret != NULL) || ((end - now) <= 0)) {
break;
}
(void) z_pend_curr(&h->lock, key, &h->wait_q,
K_TICKS(end - now));
key = k_spin_lock(&h->lock);
}
k_spin_unlock(&h->lock, key);
return ret;
}
void k_heap_free(struct k_heap *h, void *mem)
{
k_spinlock_key_t key = k_spin_lock(&h->lock);
sys_heap_free(&h->heap, mem);
k_spin_unlock(&h->lock, key);
}
Loading…
Cancel
Save