Browse Source

mem_slab: move global lock to per slab lock

This avoids contention between unrelated slabs and allows for
userspace accessible slabs when located in memory partitions.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
pull/33321/head
Nicolas Pitre 4 years ago committed by Anas Nashif
parent
commit
2bed37e534
  1. 2
      include/kernel.h
  2. 15
      kernel/mem_slab.c

2
include/kernel.h

@ -4724,6 +4724,7 @@ __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
struct k_mem_slab { struct k_mem_slab {
_wait_q_t wait_q; _wait_q_t wait_q;
struct k_spinlock lock;
uint32_t num_blocks; uint32_t num_blocks;
size_t block_size; size_t block_size;
char *buffer; char *buffer;
@ -4740,6 +4741,7 @@ struct k_mem_slab {
#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \ #define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
slab_num_blocks) \ slab_num_blocks) \
{ \ { \
.lock = {}, \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
.num_blocks = slab_num_blocks, \ .num_blocks = slab_num_blocks, \
.block_size = slab_block_size, \ .block_size = slab_block_size, \

15
kernel/mem_slab.c

@ -15,8 +15,6 @@
#include <init.h> #include <init.h>
#include <sys/check.h> #include <sys/check.h>
static struct k_spinlock lock;
#ifdef CONFIG_OBJECT_TRACING #ifdef CONFIG_OBJECT_TRACING
struct k_mem_slab *_trace_list_k_mem_slab; struct k_mem_slab *_trace_list_k_mem_slab;
#endif /* CONFIG_OBJECT_TRACING */ #endif /* CONFIG_OBJECT_TRACING */
@ -88,6 +86,7 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
slab->block_size = block_size; slab->block_size = block_size;
slab->buffer = buffer; slab->buffer = buffer;
slab->num_used = 0U; slab->num_used = 0U;
slab->lock = (struct k_spinlock) {};
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION #ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
slab->max_used = 0U; slab->max_used = 0U;
@ -108,7 +107,7 @@ out:
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
{ {
k_spinlock_key_t key = k_spin_lock(&lock); k_spinlock_key_t key = k_spin_lock(&slab->lock);
int result; int result;
if (slab->free_list != NULL) { if (slab->free_list != NULL) {
@ -128,21 +127,21 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
result = -ENOMEM; result = -ENOMEM;
} else { } else {
/* wait for a free block or timeout */ /* wait for a free block or timeout */
result = z_pend_curr(&lock, key, &slab->wait_q, timeout); result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
if (result == 0) { if (result == 0) {
*mem = _current->base.swap_data; *mem = _current->base.swap_data;
} }
return result; return result;
} }
k_spin_unlock(&lock, key); k_spin_unlock(&slab->lock, key);
return result; return result;
} }
void k_mem_slab_free(struct k_mem_slab *slab, void **mem) void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
{ {
k_spinlock_key_t key = k_spin_lock(&lock); k_spinlock_key_t key = k_spin_lock(&slab->lock);
if (slab->free_list == NULL) { if (slab->free_list == NULL) {
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q); struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
@ -150,12 +149,12 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
if (pending_thread != NULL) { if (pending_thread != NULL) {
z_thread_return_value_set_with_data(pending_thread, 0, *mem); z_thread_return_value_set_with_data(pending_thread, 0, *mem);
z_ready_thread(pending_thread); z_ready_thread(pending_thread);
z_reschedule(&lock, key); z_reschedule(&slab->lock, key);
return; return;
} }
} }
**(char ***) mem = slab->free_list; **(char ***) mem = slab->free_list;
slab->free_list = *(char **) mem; slab->free_list = *(char **) mem;
slab->num_used--; slab->num_used--;
k_spin_unlock(&lock, key); k_spin_unlock(&slab->lock, key);
} }

Loading…
Cancel
Save