Browse Source

kernel: mem_domain: arch_mem_domain functions to return errors

This changes the arch_mem_domain_*() functions to return errors.
This allows the callers a chance to recover if needed.

Note that:
() For assertions where it can bail out early without side
   effects, these are converted to CHECKIF(). (Usually means
   that updating of page tables or translation tables has not
   been started yet.)
() Other assertions are retained to signal fatal errors during
   development.
() The additional CHECKIF() are structured so that it will bail
   early if possible. If errors are encountered inside a loop,
   it will still continue with the loop so it works as before
   this changes with assertions disabled.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
pull/40572/head
Daniel Leung 4 years ago committed by Anas Nashif
parent
commit
1cd7cccbb1
  1. 132
      arch/arm64/core/cortex_r/arm_mpu.c
  2. 56
      arch/arm64/core/mmu.c
  3. 73
      arch/riscv/core/pmp/core_pmp.c
  4. 6
      arch/riscv/include/core_pmp.h
  5. 295
      arch/x86/core/x86_mmu.c
  6. 27
      include/sys/arch_interface.h

132
arch/arm64/core/cortex_r/arm_mpu.c

@ -12,6 +12,7 @@ @@ -12,6 +12,7 @@
#include <arch/arm64/mm.h>
#include <linker/linker-defs.h>
#include <logging/log.h>
#include <sys/check.h>
LOG_MODULE_REGISTER(mpu, CONFIG_MPU_LOG_LEVEL);
@ -247,8 +248,13 @@ static int dynamic_areas_init(uintptr_t start, size_t size) @@ -247,8 +248,13 @@ static int dynamic_areas_init(uintptr_t start, size_t size)
static int dup_dynamic_regions(struct dynamic_region_info *dst, int len)
{
size_t i;
int ret = sys_dyn_regions_num;
__ASSERT(sys_dyn_regions_num < len, "system dynamic region nums too large.");
CHECKIF(!(sys_dyn_regions_num < len)) {
LOG_ERR("system dynamic region nums too large.");
ret = -EINVAL;
goto out;
}
for (i = 0; i < sys_dyn_regions_num; i++) {
dst[i] = sys_dyn_regions[i];
@ -257,7 +263,8 @@ static int dup_dynamic_regions(struct dynamic_region_info *dst, int len) @@ -257,7 +263,8 @@ static int dup_dynamic_regions(struct dynamic_region_info *dst, int len)
dst[i].index = -1;
}
return sys_dyn_regions_num;
out:
return ret;
}
static void set_region(struct arm_mpu_region *region,
@ -283,10 +290,10 @@ static int get_underlying_region_idx(struct dynamic_region_info *dyn_regions, @@ -283,10 +290,10 @@ static int get_underlying_region_idx(struct dynamic_region_info *dyn_regions,
return -1;
}
static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
uint8_t region_idx, uint8_t region_num,
uintptr_t start, size_t size,
struct arm_mpu_region_attr *attr)
static int insert_region(struct dynamic_region_info *dyn_regions,
uint8_t region_idx, uint8_t region_num,
uintptr_t start, size_t size,
struct arm_mpu_region_attr *attr)
{
/* base: inclusive, limit: exclusive */
@ -297,15 +304,23 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions, @@ -297,15 +304,23 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
uint64_t u_base;
uint64_t u_limit;
struct arm_mpu_region_attr *u_attr;
__ASSERT(region_idx < region_num,
"Out-of-bounds error for dynamic region map. region idx: %d, region num: %d",
region_idx, region_num);
int ret = 0;
CHECKIF(!(region_idx < region_num)) {
LOG_ERR("Out-of-bounds error for dynamic region map. "
"region idx: %d, region num: %d",
region_idx, region_num);
ret = -EINVAL;
goto out;
}
u_idx = get_underlying_region_idx(dyn_regions, region_idx, base, limit);
__ASSERT(u_idx >= 0, "Invalid underlying region index");
CHECKIF(!(u_idx >= 0)) {
LOG_ERR("Invalid underlying region index");
ret = -ENOENT;
goto out;
}
/* Get underlying region range and attr */
u_region = &(dyn_regions[u_idx].region_conf);
@ -339,13 +354,18 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions, @@ -339,13 +354,18 @@ static uint8_t insert_region(struct dynamic_region_info *dyn_regions,
region_idx++;
}
return region_idx;
ret = region_idx;
out:
return ret;
}
static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
uint8_t region_num)
{
int reg_avail_idx = static_regions_num;
int ret = 0;
/*
* Clean the dynamic regions
*/
@ -371,16 +391,20 @@ static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions, @@ -371,16 +391,20 @@ static int flush_dynamic_regions_to_mpu(struct dynamic_region_info *dyn_regions,
if (region_idx < 0) {
region_idx = reg_avail_idx++;
}
__ASSERT(region_idx < get_num_regions(),
"Out-of-bounds error for mpu regions. region idx: %d, total mpu regions: %d",
region_idx, get_num_regions());
CHECKIF(!(region_idx < get_num_regions())) {
LOG_ERR("Out-of-bounds error for mpu regions. "
"region idx: %d, total mpu regions: %d",
region_idx, get_num_regions());
ret = -ENOENT;
}
region_init(region_idx, &(dyn_regions[i].region_conf));
}
return 0;
return ret;
}
static void configure_dynamic_mpu_regions(struct k_thread *thread)
static int configure_dynamic_mpu_regions(struct k_thread *thread)
{
/*
* Allocate double space for dyn_regions. Because when split
@ -390,8 +414,15 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread) @@ -390,8 +414,15 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread)
struct dynamic_region_info dyn_regions[_MAX_DYNAMIC_MPU_REGIONS_NUM * 2];
const uint8_t max_region_num = ARRAY_SIZE(dyn_regions);
uint8_t region_num;
int ret = 0, ret2;
region_num = dup_dynamic_regions(dyn_regions, max_region_num);
ret2 = dup_dynamic_regions(dyn_regions, max_region_num);
CHECKIF(ret2 < 0) {
ret = ret2;
goto out;
}
region_num = (uint8_t)ret2;
struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
@ -409,29 +440,42 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread) @@ -409,29 +440,42 @@ static void configure_dynamic_mpu_regions(struct k_thread *thread)
}
LOG_DBG("set region 0x%lx 0x%lx",
partition->start, partition->size);
region_num = insert_region(dyn_regions,
region_num,
max_region_num,
partition->start,
partition->size,
&partition->attr);
ret2 = insert_region(dyn_regions,
region_num,
max_region_num,
partition->start,
partition->size,
&partition->attr);
CHECKIF(ret2 != 0) {
ret = ret2;
}
region_num = (uint8_t)ret2;
}
}
LOG_DBG("configure user thread %p's context", thread);
if ((thread->base.user_options & K_USER) != 0) {
/* K_USER thread stack needs a region */
region_num = insert_region(dyn_regions,
region_num,
max_region_num,
thread->stack_info.start,
thread->stack_info.size,
&K_MEM_PARTITION_P_RW_U_RW);
ret2 = insert_region(dyn_regions,
region_num,
max_region_num,
thread->stack_info.start,
thread->stack_info.size,
&K_MEM_PARTITION_P_RW_U_RW);
CHECKIF(ret2 != 0) {
ret = ret2;
}
region_num = (uint8_t)ret2;
}
arm_core_mpu_disable();
flush_dynamic_regions_to_mpu(dyn_regions, region_num);
ret = flush_dynamic_regions_to_mpu(dyn_regions, region_num);
arm_core_mpu_enable();
out:
return ret;
}
int arch_mem_domain_max_partitions_get(void)
@ -445,22 +489,28 @@ int arch_mem_domain_max_partitions_get(void) @@ -445,22 +489,28 @@ int arch_mem_domain_max_partitions_get(void)
return max_parts;
}
void arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id)
int arch_mem_domain_partition_add(struct k_mem_domain *domain, uint32_t partition_id)
{
ARG_UNUSED(domain);
ARG_UNUSED(partition_id);
return 0;
}
void arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id)
int arch_mem_domain_partition_remove(struct k_mem_domain *domain, uint32_t partition_id)
{
ARG_UNUSED(domain);
ARG_UNUSED(partition_id);
return 0;
}
void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
int ret = 0;
if (thread == _current) {
configure_dynamic_mpu_regions(thread);
ret = configure_dynamic_mpu_regions(thread);
}
#ifdef CONFIG_SMP
else {
@ -468,12 +518,16 @@ void arch_mem_domain_thread_add(struct k_thread *thread) @@ -468,12 +518,16 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
z_arm64_mem_cfg_ipi();
}
#endif
return ret;
}
void arch_mem_domain_thread_remove(struct k_thread *thread)
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
int ret = 0;
if (thread == _current) {
configure_dynamic_mpu_regions(thread);
ret = configure_dynamic_mpu_regions(thread);
}
#ifdef CONFIG_SMP
else {
@ -481,6 +535,8 @@ void arch_mem_domain_thread_remove(struct k_thread *thread) @@ -481,6 +535,8 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
z_arm64_mem_cfg_ipi();
}
#endif
return ret;
}
void z_arm64_thread_mem_domains_init(struct k_thread *thread)

56
arch/arm64/core/mmu.c

@ -1006,8 +1006,8 @@ int arch_mem_domain_init(struct k_mem_domain *domain) @@ -1006,8 +1006,8 @@ int arch_mem_domain_init(struct k_mem_domain *domain)
return 0;
}
static void private_map(struct arm_mmu_ptables *ptables, const char *name,
uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
static int private_map(struct arm_mmu_ptables *ptables, const char *name,
uintptr_t phys, uintptr_t virt, size_t size, uint32_t attrs)
{
int ret;
@ -1018,10 +1018,12 @@ static void private_map(struct arm_mmu_ptables *ptables, const char *name, @@ -1018,10 +1018,12 @@ static void private_map(struct arm_mmu_ptables *ptables, const char *name,
if (is_ptable_active(ptables)) {
invalidate_tlb_all();
}
return ret;
}
static void reset_map(struct arm_mmu_ptables *ptables, const char *name,
uintptr_t addr, size_t size)
static int reset_map(struct arm_mmu_ptables *ptables, const char *name,
uintptr_t addr, size_t size)
{
int ret;
@ -1030,40 +1032,44 @@ static void reset_map(struct arm_mmu_ptables *ptables, const char *name, @@ -1030,40 +1032,44 @@ static void reset_map(struct arm_mmu_ptables *ptables, const char *name,
if (is_ptable_active(ptables)) {
invalidate_tlb_all();
}
return ret;
}
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
{
struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables;
struct k_mem_partition *ptn = &domain->partitions[partition_id];
private_map(domain_ptables, "partition", ptn->start, ptn->start,
ptn->size, ptn->attr.attrs | MT_NORMAL);
return private_map(domain_ptables, "partition", ptn->start, ptn->start,
ptn->size, ptn->attr.attrs | MT_NORMAL);
}
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
{
struct arm_mmu_ptables *domain_ptables = &domain->arch.ptables;
struct k_mem_partition *ptn = &domain->partitions[partition_id];
reset_map(domain_ptables, "partition removal", ptn->start, ptn->size);
return reset_map(domain_ptables, "partition removal",
ptn->start, ptn->size);
}
static void map_thread_stack(struct k_thread *thread,
struct arm_mmu_ptables *ptables)
static int map_thread_stack(struct k_thread *thread,
struct arm_mmu_ptables *ptables)
{
private_map(ptables, "thread_stack", thread->stack_info.start,
thread->stack_info.start, thread->stack_info.size,
MT_P_RW_U_RW | MT_NORMAL);
return private_map(ptables, "thread_stack", thread->stack_info.start,
thread->stack_info.start, thread->stack_info.size,
MT_P_RW_U_RW | MT_NORMAL);
}
void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
struct arm_mmu_ptables *old_ptables, *domain_ptables;
struct k_mem_domain *domain;
bool is_user, is_migration;
int ret = 0;
domain = thread->mem_domain_info.mem_domain;
domain_ptables = &domain->arch.ptables;
@ -1073,7 +1079,7 @@ void arch_mem_domain_thread_add(struct k_thread *thread) @@ -1073,7 +1079,7 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
is_migration = (old_ptables != NULL) && is_user;
if (is_migration) {
map_thread_stack(thread, domain_ptables);
ret = map_thread_stack(thread, domain_ptables);
}
thread->arch.ptables = domain_ptables;
@ -1089,12 +1095,14 @@ void arch_mem_domain_thread_add(struct k_thread *thread) @@ -1089,12 +1095,14 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
}
if (is_migration) {
reset_map(old_ptables, __func__, thread->stack_info.start,
ret = reset_map(old_ptables, __func__, thread->stack_info.start,
thread->stack_info.size);
}
return ret;
}
void arch_mem_domain_thread_remove(struct k_thread *thread)
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
struct arm_mmu_ptables *domain_ptables;
struct k_mem_domain *domain;
@ -1103,15 +1111,15 @@ void arch_mem_domain_thread_remove(struct k_thread *thread) @@ -1103,15 +1111,15 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
domain_ptables = &domain->arch.ptables;
if ((thread->base.user_options & K_USER) == 0) {
return;
return 0;
}
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
return;
return 0;
}
reset_map(domain_ptables, __func__, thread->stack_info.start,
thread->stack_info.size);
return reset_map(domain_ptables, __func__, thread->stack_info.start,
thread->stack_info.size);
}
static void z_arm64_swap_ptables(struct k_thread *incoming)

73
arch/riscv/core/pmp/core_pmp.c

@ -7,10 +7,14 @@ @@ -7,10 +7,14 @@
#include <kernel.h>
#include <kernel_internal.h>
#include <sys/__assert.h>
#include <sys/check.h>
#include "core_pmp.h"
#include <arch/riscv/csr.h>
#include <stdio.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#define PMP_SLOT_NUMBER CONFIG_PMP_SLOT
#ifdef CONFIG_USERSPACE
@ -284,17 +288,21 @@ void z_riscv_configure_user_allowed_stack(struct k_thread *thread) @@ -284,17 +288,21 @@ void z_riscv_configure_user_allowed_stack(struct k_thread *thread)
csr_write_enum(CSR_PMPCFG0 + i, thread->arch.u_pmpcfg[i]);
}
void z_riscv_pmp_add_dynamic(struct k_thread *thread,
int z_riscv_pmp_add_dynamic(struct k_thread *thread,
ulong_t addr,
ulong_t size,
unsigned char flags)
{
unsigned char index = 0U;
unsigned char *uchar_pmpcfg;
int ret = 0;
/* Check 4 bytes alignment */
__ASSERT(((addr & 0x3) == 0) && ((size & 0x3) == 0) && size,
"address/size are not 4 bytes aligned\n");
CHECKIF(!(((addr & 0x3) == 0) && ((size & 0x3) == 0) && size)) {
LOG_ERR("address/size are not 4 bytes aligned\n");
ret = -EINVAL;
goto out;
}
/* Get next free entry */
uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
@ -306,6 +314,10 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread, @@ -306,6 +314,10 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread,
}
__ASSERT((index < CONFIG_PMP_SLOT), "no free PMP entry\n");
CHECKIF(!(index < CONFIG_PMP_SLOT)) {
ret = -ENOSPC;
goto out;
}
/* Select the best type */
if (size == 4) {
@ -316,6 +328,11 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread, @@ -316,6 +328,11 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread,
else if ((addr & (size - 1)) || (size & (size - 1))) {
__ASSERT(((index + 1) < CONFIG_PMP_SLOT),
"not enough free PMP entries\n");
CHECKIF(!((index + 1) < CONFIG_PMP_SLOT)) {
ret = -ENOSPC;
goto out;
}
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr);
uchar_pmpcfg[index++] = flags | PMP_NA4;
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr + size);
@ -326,6 +343,9 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread, @@ -326,6 +343,9 @@ void z_riscv_pmp_add_dynamic(struct k_thread *thread,
thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(addr, size);
uchar_pmpcfg[index] = flags | PMP_NAPOT;
}
out:
return ret;
}
int arch_buffer_validate(void *addr, size_t size, int write)
@ -413,8 +433,8 @@ int arch_mem_domain_max_partitions_get(void) @@ -413,8 +433,8 @@ int arch_mem_domain_max_partitions_get(void)
return PMP_MAX_DYNAMIC_REGION;
}
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
{
sys_dnode_t *node, *next_node;
uint32_t index, i, num;
@ -423,6 +443,7 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain, @@ -423,6 +443,7 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
struct k_thread *thread;
ulong_t size = (ulong_t) domain->partitions[partition_id].size;
ulong_t start = (ulong_t) domain->partitions[partition_id].start;
int ret = 0;
if (size == 4) {
pmp_type = PMP_NA4;
@ -444,7 +465,8 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain, @@ -444,7 +465,8 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
node = sys_dlist_peek_head(&domain->mem_domain_q);
if (!node) {
return;
ret = -ENOENT;
goto out;
}
thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
@ -459,7 +481,11 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain, @@ -459,7 +481,11 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
}
}
__ASSERT((index < CONFIG_PMP_SLOT), "partition not found\n");
CHECKIF(!(index < CONFIG_PMP_SLOT)) {
LOG_DBG("%s: partition not found\n", __func__);
ret = -ENOENT;
goto out;
}
#if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
if (pmp_type == PMP_TOR) {
@ -483,11 +509,15 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain, @@ -483,11 +509,15 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uchar_pmpcfg[CONFIG_PMP_SLOT - 2] = 0U;
}
}
out:
return ret;
}
void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
struct k_mem_partition *partition;
int ret = 0, ret2;
for (int i = 0, pcount = 0;
pcount < thread->mem_domain_info.mem_domain->num_partitions;
@ -498,29 +528,44 @@ void arch_mem_domain_thread_add(struct k_thread *thread) @@ -498,29 +528,44 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
}
pcount++;
z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
ret2 = z_riscv_pmp_add_dynamic(thread,
(ulong_t) partition->start,
(ulong_t) partition->size, partition->attr.pmp_attr);
ARG_UNUSED(ret2);
CHECKIF(ret2 != 0) {
ret = ret2;
}
}
return ret;
}
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
{
sys_dnode_t *node, *next_node;
struct k_thread *thread;
struct k_mem_partition *partition;
int ret = 0, ret2;
partition = &domain->partitions[partition_id];
SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
ret2 = z_riscv_pmp_add_dynamic(thread,
(ulong_t) partition->start,
(ulong_t) partition->size, partition->attr.pmp_attr);
ARG_UNUSED(ret2);
CHECKIF(ret2 != 0) {
ret = ret2;
}
}
return ret;
}
void arch_mem_domain_thread_remove(struct k_thread *thread)
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
uint32_t i;
unsigned char *uchar_pmpcfg;
@ -530,6 +575,8 @@ void arch_mem_domain_thread_remove(struct k_thread *thread) @@ -530,6 +575,8 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
for (i = PMP_REGION_NUM_FOR_U_THREAD; i < CONFIG_PMP_SLOT; i++) {
uchar_pmpcfg[i] = 0U;
}
return 0;
}
#endif /* CONFIG_USERSPACE */

6
arch/riscv/include/core_pmp.h

@ -94,8 +94,12 @@ void z_riscv_configure_user_allowed_stack(struct k_thread *thread); @@ -94,8 +94,12 @@ void z_riscv_configure_user_allowed_stack(struct k_thread *thread);
* @param addr Start address of the memory area.
* @param size Size of the memory area.
* @param flags Pemissions: PMP_R, PMP_W, PMP_X, PMP_L
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOSPC if no free PMP entry
*/
void z_riscv_pmp_add_dynamic(struct k_thread *thread,
int z_riscv_pmp_add_dynamic(struct k_thread *thread,
ulong_t addr,
ulong_t size,
unsigned char flags);

295
arch/x86/core/x86_mmu.c

@ -9,6 +9,7 @@ @@ -9,6 +9,7 @@
#include <arch/x86/mmustructs.h>
#include <sys/mem_manage.h>
#include <sys/__assert.h>
#include <sys/check.h>
#include <logging/log.h>
#include <errno.h>
#include <ctype.h>
@ -452,6 +453,16 @@ static inline void assert_addr_aligned(uintptr_t addr) @@ -452,6 +453,16 @@ static inline void assert_addr_aligned(uintptr_t addr)
#endif
}
__pinned_func
static inline bool is_addr_aligned(uintptr_t addr)
{
if ((addr & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U) {
return true;
} else {
return false;
}
}
__pinned_func
static inline void assert_virt_addr_aligned(void *addr)
{
@ -459,15 +470,47 @@ static inline void assert_virt_addr_aligned(void *addr) @@ -459,15 +470,47 @@ static inline void assert_virt_addr_aligned(void *addr)
}
__pinned_func
static inline void assert_region_page_aligned(void *addr, size_t size)
static inline bool is_virt_addr_aligned(void *addr)
{
return is_addr_aligned((uintptr_t)addr);
}
__pinned_func
static inline void assert_size_aligned(size_t size)
{
assert_virt_addr_aligned(addr);
#if __ASSERT_ON
__ASSERT((size & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U,
"unaligned size %zu", size);
#endif
}
__pinned_func
static inline bool is_size_aligned(size_t size)
{
if ((size & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U) {
return true;
} else {
return false;
}
}
__pinned_func
static inline void assert_region_page_aligned(void *addr, size_t size)
{
assert_virt_addr_aligned(addr);
assert_size_aligned(size);
}
__pinned_func
static inline bool is_region_page_aligned(void *addr, size_t size)
{
if (!is_virt_addr_aligned(addr)) {
return false;
}
return is_size_aligned(size);
}
/*
* Debug functions. All conditionally compiled with CONFIG_EXCEPTION_DEBUG.
*/
@ -946,13 +989,17 @@ static inline pentry_t pte_atomic_update(pentry_t *pte, pentry_t update_val, @@ -946,13 +989,17 @@ static inline pentry_t pte_atomic_update(pentry_t *pte, pentry_t update_val,
* @param mask What bits to update in the PTE (ignored if OPTION_RESET or
* OPTION_CLEAR)
* @param options Control options, described above
*
* @retval 0 if successful
* @retval -EFAULT if large page encountered or missing page table level
*/
__pinned_func
static void page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val,
pentry_t *old_val_ptr, pentry_t mask, uint32_t options)
static int page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val,
pentry_t *old_val_ptr, pentry_t mask, uint32_t options)
{
pentry_t *table = ptables;
bool flush = (options & OPTION_FLUSH) != 0U;
int ret = 0;
for (int level = 0; level < NUM_LEVELS; level++) {
int index;
@ -971,20 +1018,40 @@ static void page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val, @@ -971,20 +1018,40 @@ static void page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val,
break;
}
/* We fail an assertion here due to no support for
/* We bail out early here due to no support for
* splitting existing bigpage mappings.
* If the PS bit is not supported at some level (like
* in a PML4 entry) it is always reserved and must be 0
*/
__ASSERT((*entryp & MMU_PS) == 0U, "large page encountered");
CHECKIF(!((*entryp & MMU_PS) == 0U)) {
/* Cannot continue since we cannot split
* bigpage mappings.
*/
LOG_ERR("large page encountered");
ret = -EFAULT;
goto out;
}
table = next_table(*entryp, level);
__ASSERT(table != NULL,
"missing page table level %d when trying to map %p",
level + 1, virt);
CHECKIF(!(table != NULL)) {
/* Cannot continue since table is NULL,
* and it cannot be dereferenced in next loop
* iteration.
*/
LOG_ERR("missing page table level %d when trying to map %p",
level + 1, virt);
ret = -EFAULT;
goto out;
}
}
out:
if (flush) {
tlb_flush_page(virt);
}
return ret;
}
/**
@ -1012,20 +1079,30 @@ static void page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val, @@ -1012,20 +1079,30 @@ static void page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val,
* @param mask What bits to update in each PTE. Un-set bits will never be
* modified. Ignored if OPTION_RESET or OPTION_CLEAR.
* @param options Control options, described above
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters are supplied
* @retval -EFAULT if errors encountered when updating page tables
*/
__pinned_func
static void range_map_ptables(pentry_t *ptables, void *virt, uintptr_t phys,
size_t size, pentry_t entry_flags, pentry_t mask,
uint32_t options)
static int range_map_ptables(pentry_t *ptables, void *virt, uintptr_t phys,
size_t size, pentry_t entry_flags, pentry_t mask,
uint32_t options)
{
bool zero_entry = (options & (OPTION_RESET | OPTION_CLEAR)) != 0U;
int ret = 0, ret2;
assert_addr_aligned(phys);
__ASSERT((size & (CONFIG_MMU_PAGE_SIZE - 1)) == 0U,
"unaligned size %zu", size);
__ASSERT((entry_flags & paging_levels[0].mask) == 0U,
"entry_flags " PRI_ENTRY " overlaps address area",
entry_flags);
CHECKIF(!is_addr_aligned(phys) || !is_size_aligned(size)) {
ret = -EINVAL;
goto out;
}
CHECKIF(!((entry_flags & paging_levels[0].mask) == 0U)) {
LOG_ERR("entry_flags " PRI_ENTRY " overlaps address area",
entry_flags);
ret = -EINVAL;
goto out;
}
/* This implementation is stack-efficient but not particularly fast.
* We do a full page table walk for every page we are updating.
@ -1041,9 +1118,16 @@ static void range_map_ptables(pentry_t *ptables, void *virt, uintptr_t phys, @@ -1041,9 +1118,16 @@ static void range_map_ptables(pentry_t *ptables, void *virt, uintptr_t phys,
entry_val = (pentry_t)(phys + offset) | entry_flags;
}
page_map_set(ptables, dest_virt, entry_val, NULL, mask,
options);
ret2 = page_map_set(ptables, dest_virt, entry_val, NULL, mask,
options);
ARG_UNUSED(ret2);
CHECKIF(ret2 != 0) {
ret = ret2;
}
}
out:
return ret;
}
/**
@ -1067,11 +1151,17 @@ static void range_map_ptables(pentry_t *ptables, void *virt, uintptr_t phys, @@ -1067,11 +1151,17 @@ static void range_map_ptables(pentry_t *ptables, void *virt, uintptr_t phys,
* be preserved. Ignored if OPTION_RESET.
* @param options Control options. Do not set OPTION_USER here. OPTION_FLUSH
* will trigger a TLB shootdown after all tables are updated.
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters are supplied
* @retval -EFAULT if errors encountered when updating page tables
*/
__pinned_func
static void range_map(void *virt, uintptr_t phys, size_t size,
pentry_t entry_flags, pentry_t mask, uint32_t options)
static int range_map(void *virt, uintptr_t phys, size_t size,
pentry_t entry_flags, pentry_t mask, uint32_t options)
{
int ret = 0, ret2;
LOG_DBG("%s: %p -> %p (%zu) flags " PRI_ENTRY " mask "
PRI_ENTRY " opt 0x%x", __func__, (void *)phys, virt, size,
entry_flags, mask, options);
@ -1086,7 +1176,11 @@ static void range_map(void *virt, uintptr_t phys, size_t size, @@ -1086,7 +1176,11 @@ static void range_map(void *virt, uintptr_t phys, size_t size,
virt, size);
#endif /* CONFIG_X86_64 */
__ASSERT((options & OPTION_USER) == 0U, "invalid option for function");
CHECKIF(!((options & OPTION_USER) == 0U)) {
LOG_ERR("invalid option for mapping");
ret = -EINVAL;
goto out;
}
/* All virtual-to-physical mappings are the same in all page tables.
* What can differ is only access permissions, defined by the memory
@ -1102,30 +1196,46 @@ static void range_map(void *virt, uintptr_t phys, size_t size, @@ -1102,30 +1196,46 @@ static void range_map(void *virt, uintptr_t phys, size_t size,
struct arch_mem_domain *domain =
CONTAINER_OF(node, struct arch_mem_domain, node);
range_map_ptables(domain->ptables, virt, phys, size,
entry_flags, mask, options | OPTION_USER);
ret2 = range_map_ptables(domain->ptables, virt, phys, size,
entry_flags, mask,
options | OPTION_USER);
ARG_UNUSED(ret2);
CHECKIF(ret2 != 0) {
ret = ret2;
}
}
#endif /* CONFIG_USERSPACE */
range_map_ptables(z_x86_kernel_ptables, virt, phys, size, entry_flags,
mask, options);
ret2 = range_map_ptables(z_x86_kernel_ptables, virt, phys, size,
entry_flags, mask, options);
ARG_UNUSED(ret2);
CHECKIF(ret2 != 0) {
ret = ret2;
}
out:
#ifdef CONFIG_SMP
if ((options & OPTION_FLUSH) != 0U) {
tlb_shootdown();
}
#endif /* CONFIG_SMP */
return ret;
}
__pinned_func
static inline void range_map_unlocked(void *virt, uintptr_t phys, size_t size,
pentry_t entry_flags, pentry_t mask,
uint32_t options)
static inline int range_map_unlocked(void *virt, uintptr_t phys, size_t size,
pentry_t entry_flags, pentry_t mask,
uint32_t options)
{
k_spinlock_key_t key;
int ret;
key = k_spin_lock(&x86_mmu_lock);
range_map(virt, phys, size, entry_flags, mask, options);
ret = range_map(virt, phys, size, entry_flags, mask, options);
k_spin_unlock(&x86_mmu_lock, key);
return ret;
}
__pinned_func
@ -1171,15 +1281,23 @@ static pentry_t flags_to_entry(uint32_t flags) @@ -1171,15 +1281,23 @@ static pentry_t flags_to_entry(uint32_t flags)
__pinned_func
void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
{
range_map_unlocked(virt, phys, size, flags_to_entry(flags),
MASK_ALL, 0);
int ret;
ret = range_map_unlocked(virt, phys, size, flags_to_entry(flags),
MASK_ALL, 0);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);
}
/* unmap region addr..addr+size, reset entries and flush TLB */
void arch_mem_unmap(void *addr, size_t size)
{
range_map_unlocked((void *)addr, 0, size, 0, 0,
OPTION_FLUSH | OPTION_CLEAR);
int ret;
ret = range_map_unlocked((void *)addr, 0, size, 0, 0,
OPTION_FLUSH | OPTION_CLEAR);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);
}
#ifdef Z_VM_KERNEL
@ -1241,6 +1359,8 @@ void z_x86_mmu_init(void) @@ -1241,6 +1359,8 @@ void z_x86_mmu_init(void)
__pinned_func
void z_x86_set_stack_guard(k_thread_stack_t *stack)
{
int ret;
/* Applied to all page tables as this affects supervisor mode.
* XXX: This never gets reset when the thread exits, which can
* cause problems if the memory is later used for something else.
@ -1249,8 +1369,10 @@ void z_x86_set_stack_guard(k_thread_stack_t *stack) @@ -1249,8 +1369,10 @@ void z_x86_set_stack_guard(k_thread_stack_t *stack)
* Guard page is always the first page of the stack object for both
* kernel and thread stacks.
*/
range_map_unlocked(stack, 0, CONFIG_MMU_PAGE_SIZE,
MMU_P | ENTRY_XD, MASK_PERM, OPTION_FLUSH);
ret = range_map_unlocked(stack, 0, CONFIG_MMU_PAGE_SIZE,
MMU_P | ENTRY_XD, MASK_PERM, OPTION_FLUSH);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);
}
#endif /* CONFIG_X86_STACK_PROTECTION */
@ -1354,17 +1476,17 @@ int arch_buffer_validate(void *addr, size_t size, int write) @@ -1354,17 +1476,17 @@ int arch_buffer_validate(void *addr, size_t size, int write)
*/
__pinned_func
static inline void reset_region(uintptr_t start, size_t size)
static inline int reset_region(uintptr_t start, size_t size)
{
range_map_unlocked((void *)start, 0, size, 0, 0,
OPTION_FLUSH | OPTION_RESET);
return range_map_unlocked((void *)start, 0, size, 0, 0,
OPTION_FLUSH | OPTION_RESET);
}
__pinned_func
static inline void apply_region(uintptr_t start, size_t size, pentry_t attr)
static inline int apply_region(uintptr_t start, size_t size, pentry_t attr)
{
range_map_unlocked((void *)start, 0, size, attr, MASK_PERM,
OPTION_FLUSH);
return range_map_unlocked((void *)start, 0, size, attr, MASK_PERM,
OPTION_FLUSH);
}
/* Cache of the current memory domain applied to the common page tables and
@ -1448,44 +1570,46 @@ out_unlock: @@ -1448,44 +1570,46 @@ out_unlock:
* page tables.
*/
__pinned_func
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
{
struct k_mem_partition *ptn;
if (domain != current_domain) {
return;
return 0;
}
ptn = &domain->partitions[partition_id];
reset_region(ptn->start, ptn->size);
return reset_region(ptn->start, ptn->size);
}
__pinned_func
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
{
struct k_mem_partition *ptn;
if (domain != current_domain) {
return;
return 0;
}
ptn = &domain->partitions[partition_id];
apply_region(ptn->start, ptn->size, ptn->attr);
return apply_region(ptn->start, ptn->size, ptn->attr);
}
/* Rest of the APIs don't need to do anything */
__pinned_func
void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
return 0;
}
__pinned_func
void arch_mem_domain_thread_remove(struct k_thread *thread)
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
return 0;
}
#else
/* Memory domains each have a set of page tables assigned to them */
@ -1605,10 +1729,11 @@ static int copy_page_table(pentry_t *dst, pentry_t *src, int level) @@ -1605,10 +1729,11 @@ static int copy_page_table(pentry_t *dst, pentry_t *src, int level)
}
__pinned_func
static void region_map_update(pentry_t *ptables, void *start,
static int region_map_update(pentry_t *ptables, void *start,
size_t size, pentry_t flags, bool reset)
{
uint32_t options = OPTION_USER;
int ret;
k_spinlock_key_t key;
if (reset) {
@ -1619,29 +1744,31 @@ static void region_map_update(pentry_t *ptables, void *start, @@ -1619,29 +1744,31 @@ static void region_map_update(pentry_t *ptables, void *start,
}
key = k_spin_lock(&x86_mmu_lock);
(void)range_map_ptables(ptables, start, 0, size, flags, MASK_PERM,
ret = range_map_ptables(ptables, start, 0, size, flags, MASK_PERM,
options);
k_spin_unlock(&x86_mmu_lock, key);
#ifdef CONFIG_SMP
tlb_shootdown();
#endif
return ret;
}
__pinned_func
static inline void reset_region(pentry_t *ptables, void *start, size_t size)
static inline int reset_region(pentry_t *ptables, void *start, size_t size)
{
LOG_DBG("%s(%p, %p, %zu)", __func__, ptables, start, size);
region_map_update(ptables, start, size, 0, true);
return region_map_update(ptables, start, size, 0, true);
}
__pinned_func
static inline void apply_region(pentry_t *ptables, void *start,
static inline int apply_region(pentry_t *ptables, void *start,
size_t size, pentry_t attr)
{
LOG_DBG("%s(%p, %p, %zu, " PRI_ENTRY ")", __func__, ptables, start,
size, attr);
region_map_update(ptables, start, size, attr, false);
return region_map_update(ptables, start, size, attr, false);
}
__pinned_func
@ -1720,23 +1847,23 @@ int arch_mem_domain_init(struct k_mem_domain *domain) @@ -1720,23 +1847,23 @@ int arch_mem_domain_init(struct k_mem_domain *domain)
return ret;
}
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
{
struct k_mem_partition *partition = &domain->partitions[partition_id];
/* Reset the partition's region back to defaults */
reset_region(domain->arch.ptables, (void *)partition->start,
partition->size);
return reset_region(domain->arch.ptables, (void *)partition->start,
partition->size);
}
/* Called on thread exit or when moving it to a different memory domain */
void arch_mem_domain_thread_remove(struct k_thread *thread)
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
if ((thread->base.user_options & K_USER) == 0) {
return;
return 0;
}
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
@ -1745,31 +1872,34 @@ void arch_mem_domain_thread_remove(struct k_thread *thread) @@ -1745,31 +1872,34 @@ void arch_mem_domain_thread_remove(struct k_thread *thread)
* z_thread_abort(). Resetting the stack region will
* take place in the forthcoming thread_add() call.
*/
return;
return 0;
}
/* Restore permissions on the thread's stack area since it is no
* longer a member of the domain.
*/
reset_region(domain->arch.ptables, (void *)thread->stack_info.start,
thread->stack_info.size);
return reset_region(domain->arch.ptables,
(void *)thread->stack_info.start,
thread->stack_info.size);
}
__pinned_func
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
{
struct k_mem_partition *partition = &domain->partitions[partition_id];
/* Update the page tables with the partition info */
apply_region(domain->arch.ptables, (void *)partition->start,
partition->size, partition->attr | MMU_P);
return apply_region(domain->arch.ptables, (void *)partition->start,
partition->size, partition->attr | MMU_P);
}
/* Invoked from memory domain API calls, as well as during thread creation */
__pinned_func
void arch_mem_domain_thread_add(struct k_thread *thread)
int arch_mem_domain_thread_add(struct k_thread *thread)
{
int ret = 0;
/* New memory domain we are being added to */
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
/* This is only set for threads that were migrating from some other
@ -1804,8 +1934,9 @@ void arch_mem_domain_thread_add(struct k_thread *thread) @@ -1804,8 +1934,9 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
* See #29601
*/
if (is_migration) {
reset_region(old_ptables, (void *)thread->stack_info.start,
thread->stack_info.size);
ret = reset_region(old_ptables,
(void *)thread->stack_info.start,
thread->stack_info.size);
}
#if !defined(CONFIG_X86_KPTI) && !defined(CONFIG_X86_COMMON_PAGE_TABLE)
@ -1818,6 +1949,8 @@ void arch_mem_domain_thread_add(struct k_thread *thread) @@ -1818,6 +1949,8 @@ void arch_mem_domain_thread_add(struct k_thread *thread)
z_x86_cr3_set(thread->arch.ptables);
}
#endif /* CONFIG_X86_KPTI */
return ret;
}
#endif /* !CONFIG_X86_COMMON_PAGE_TABLE */
@ -1947,22 +2080,28 @@ int arch_page_phys_get(void *virt, uintptr_t *phys) @@ -1947,22 +2080,28 @@ int arch_page_phys_get(void *virt, uintptr_t *phys)
__pinned_func
void arch_mem_page_out(void *addr, uintptr_t location)
{
int ret;
pentry_t mask = PTE_MASK | MMU_P | MMU_A;
/* Accessed bit set to guarantee the entry is not completely 0 in
* case of location value 0. A totally 0 PTE is un-mapped.
*/
range_map(addr, location, CONFIG_MMU_PAGE_SIZE, MMU_A, mask,
OPTION_FLUSH);
ret = range_map(addr, location, CONFIG_MMU_PAGE_SIZE, MMU_A, mask,
OPTION_FLUSH);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);
}
__pinned_func
void arch_mem_page_in(void *addr, uintptr_t phys)
{
int ret;
pentry_t mask = PTE_MASK | MMU_P | MMU_D | MMU_A;
range_map(addr, phys, CONFIG_MMU_PAGE_SIZE, MMU_P, mask,
OPTION_FLUSH);
ret = range_map(addr, phys, CONFIG_MMU_PAGE_SIZE, MMU_P, mask,
OPTION_FLUSH);
__ASSERT_NO_MSG(ret == 0);
ARG_UNUSED(ret);
}
__pinned_func

27
include/sys/arch_interface.h

@ -581,8 +581,13 @@ int arch_mem_domain_init(struct k_mem_domain *domain); @@ -581,8 +581,13 @@ int arch_mem_domain_init(struct k_mem_domain *domain);
* thread is not already a member of this domain.
*
* @param thread Thread which needs to be configured.
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOSPC if running out of space in internal structures
* (e.g. translation tables)
*/
void arch_mem_domain_thread_add(struct k_thread *thread);
int arch_mem_domain_thread_add(struct k_thread *thread);
/**
* @brief Remove a thread from a memory domain (arch-specific)
@ -594,8 +599,11 @@ void arch_mem_domain_thread_add(struct k_thread *thread); @@ -594,8 +599,11 @@ void arch_mem_domain_thread_add(struct k_thread *thread);
* is being removed from.
*
* @param thread Thread being removed from its memory domain
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
*/
void arch_mem_domain_thread_remove(struct k_thread *thread);
int arch_mem_domain_thread_remove(struct k_thread *thread);
/**
* @brief Remove a partition from the memory domain (arch-specific)
@ -609,9 +617,13 @@ void arch_mem_domain_thread_remove(struct k_thread *thread); @@ -609,9 +617,13 @@ void arch_mem_domain_thread_remove(struct k_thread *thread);
*
* @param domain The memory domain structure
* @param partition_id The partition index that needs to be deleted
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
* @retval -ENOENT if no matching partition found
*/
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id);
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id);
/**
* @brief Add a partition to the memory domain
@ -621,9 +633,12 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain, @@ -621,9 +633,12 @@ void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
*
* @param domain The memory domain structure
* @param partition_id The partition that needs to be added
*
* @retval 0 if successful
* @retval -EINVAL if invalid parameters supplied
*/
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id);
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id);
#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
/**

Loading…
Cancel
Save