From 2bed37e534870cabb95766c6b18b1e2c78818ace Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 13 Apr 2021 11:10:22 -0400 Subject: [PATCH] mem_slab: move global lock to per slab lock This avoids contention between unrelated slabs and allows for userspace accessible slabs when located in memory partitions. Signed-off-by: Nicolas Pitre --- include/kernel.h | 2 ++ kernel/mem_slab.c | 15 +++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/include/kernel.h b/include/kernel.h index b1769053709..b55a30595fc 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -4724,6 +4724,7 @@ __syscall size_t k_pipe_write_avail(struct k_pipe *pipe); struct k_mem_slab { _wait_q_t wait_q; + struct k_spinlock lock; uint32_t num_blocks; size_t block_size; char *buffer; @@ -4740,6 +4741,7 @@ struct k_mem_slab { #define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \ slab_num_blocks) \ { \ + .lock = {}, \ .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ .num_blocks = slab_num_blocks, \ .block_size = slab_block_size, \ diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index cc2d3c68ca9..30fa1410ad1 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -15,8 +15,6 @@ #include #include -static struct k_spinlock lock; - #ifdef CONFIG_OBJECT_TRACING struct k_mem_slab *_trace_list_k_mem_slab; #endif /* CONFIG_OBJECT_TRACING */ @@ -88,6 +86,7 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer, slab->block_size = block_size; slab->buffer = buffer; slab->num_used = 0U; + slab->lock = (struct k_spinlock) {}; #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION slab->max_used = 0U; @@ -108,7 +107,7 @@ out: int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) { - k_spinlock_key_t key = k_spin_lock(&lock); + k_spinlock_key_t key = k_spin_lock(&slab->lock); int result; if (slab->free_list != NULL) { @@ -128,21 +127,21 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) result = -ENOMEM; } else { /* wait for a free block or timeout */ - result = z_pend_curr(&lock, key, &slab->wait_q, timeout); + result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout); if (result == 0) { *mem = _current->base.swap_data; } return result; } - k_spin_unlock(&lock, key); + k_spin_unlock(&slab->lock, key); return result; } void k_mem_slab_free(struct k_mem_slab *slab, void **mem) { - k_spinlock_key_t key = k_spin_lock(&lock); + k_spinlock_key_t key = k_spin_lock(&slab->lock); if (slab->free_list == NULL) { struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q); @@ -150,12 +149,12 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem) if (pending_thread != NULL) { z_thread_return_value_set_with_data(pending_thread, 0, *mem); z_ready_thread(pending_thread); - z_reschedule(&lock, key); + z_reschedule(&slab->lock, key); return; } } **(char ***) mem = slab->free_list; slab->free_list = *(char **) mem; slab->num_used--; - k_spin_unlock(&lock, key); + k_spin_unlock(&slab->lock, key); }