diff --git a/arch/Kconfig b/arch/Kconfig index cec29eb3c40..a800d220b58 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -91,6 +91,7 @@ config X86 && !BOARD_HAS_TIMING_FUNCTIONS \ && !SOC_HAS_TIMING_FUNCTIONS select ARCH_HAS_STACK_CANARIES_TLS + select ARCH_SUPPORTS_MEM_MAPPED_STACKS if X86_MMU && !DEMAND_PAGING help x86 architecture diff --git a/arch/x86/core/Kconfig.ia32 b/arch/x86/core/Kconfig.ia32 index b24981c0606..d789c87eb80 100644 --- a/arch/x86/core/Kconfig.ia32 +++ b/arch/x86/core/Kconfig.ia32 @@ -67,6 +67,7 @@ config X86_STACK_PROTECTION select SET_GDT select GDT_DYNAMIC select X86_ENABLE_TSS + imply THREAD_STACK_MEM_MAPPED if !DEMAND_PAGING help This option leverages the MMU to cause a system fatal error if the bounds of the current process stack are overflowed. This is done diff --git a/arch/x86/core/Kconfig.intel64 b/arch/x86/core/Kconfig.intel64 index 64021959034..913bb0e794d 100644 --- a/arch/x86/core/Kconfig.intel64 +++ b/arch/x86/core/Kconfig.intel64 @@ -73,6 +73,7 @@ config X86_STACK_PROTECTION bool default y if HW_STACK_PROTECTION select THREAD_STACK_INFO + imply THREAD_STACK_MEM_MAPPED help This option leverages the MMU to cause a system fatal error if the bounds of the current process stack are overflowed. This is done diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index bd5e3a51d91..370386d4af9 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -86,6 +86,40 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs) } #endif +#ifdef CONFIG_THREAD_STACK_MEM_MAPPED +/** + * Check if the fault is in the guard pages. + * + * @param addr Address to be tested. + * + * @return True Address is in guard pages, false otherwise. + */ +__pinned_func +bool z_x86_check_guard_page(uintptr_t addr) +{ + struct k_thread *thread = _current; + uintptr_t start, end; + + /* Front guard size - before thread stack area */ + start = (uintptr_t)thread->stack_info.mapped.addr - CONFIG_MMU_PAGE_SIZE; + end = (uintptr_t)thread->stack_info.mapped.addr; + + if ((addr >= start) && (addr < end)) { + return true; + } + + /* Rear guard size - after thread stack area */ + start = (uintptr_t)thread->stack_info.mapped.addr + thread->stack_info.mapped.sz; + end = start + CONFIG_MMU_PAGE_SIZE; + + if ((addr >= start) && (addr < end)) { + return true; + } + + return false; +} +#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */ + #ifdef CONFIG_EXCEPTION_DEBUG static inline uintptr_t esf_get_code(const z_arch_esf_t *esf) @@ -441,6 +475,14 @@ void z_x86_page_fault_handler(z_arch_esf_t *esf) z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf); } #endif +#ifdef CONFIG_THREAD_STACK_MEM_MAPPED + void *fault_addr = z_x86_cr2_get(); + + if (z_x86_check_guard_page((uintptr_t)fault_addr)) { + z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf); + } +#endif + z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf); CODE_UNREACHABLE; } diff --git a/arch/x86/core/ia32/thread.c b/arch/x86/core/ia32/thread.c index 7830298f72e..b639bb59fdf 100644 --- a/arch/x86/core/ia32/thread.c +++ b/arch/x86/core/ia32/thread.c @@ -79,7 +79,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void *swap_entry; struct _x86_initial_frame *initial_frame; -#if CONFIG_X86_STACK_PROTECTION +#if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED) + /* This unconditionally set the first page of stack as guard page, + * which is only needed if the stack is not memory mapped. + */ z_x86_set_stack_guard(stack); #endif diff --git a/arch/x86/core/intel64/thread.c b/arch/x86/core/intel64/thread.c index e419a92d206..f26f25ab5f1 100644 --- a/arch/x86/core/intel64/thread.c +++ b/arch/x86/core/intel64/thread.c @@ -32,7 +32,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void *switch_entry; struct x86_initial_frame *iframe; -#if CONFIG_X86_STACK_PROTECTION +#if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED) + /* This unconditionally set the first page of stack as guard page, + * which is only needed if the stack is not memory mapped. + */ z_x86_set_stack_guard(stack); #endif #ifdef CONFIG_USERSPACE diff --git a/arch/x86/core/userspace.c b/arch/x86/core/userspace.c index 7ad77166b0f..dbe40b2bda0 100644 --- a/arch/x86/core/userspace.c +++ b/arch/x86/core/userspace.c @@ -69,8 +69,13 @@ void z_x86_swap_update_page_tables(struct k_thread *incoming) void *z_x86_userspace_prepare_thread(struct k_thread *thread) { void *initial_entry; + struct z_x86_thread_stack_header *header = +#ifdef CONFIG_THREAD_STACK_MEM_MAPPED + (struct z_x86_thread_stack_header *)thread->stack_info.mapped.addr; +#else (struct z_x86_thread_stack_header *)thread->stack_obj; +#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */ thread->arch.psp = header->privilege_stack + sizeof(header->privilege_stack); diff --git a/include/zephyr/arch/x86/thread_stack.h b/include/zephyr/arch/x86/thread_stack.h index 6602d85784c..f22bf4d6853 100644 --- a/include/zephyr/arch/x86/thread_stack.h +++ b/include/zephyr/arch/x86/thread_stack.h @@ -14,16 +14,20 @@ #define ARCH_STACK_PTR_ALIGN 4UL #endif -#if defined(CONFIG_X86_STACK_PROTECTION) || defined(CONFIG_USERSPACE) +#if defined(CONFIG_X86_STACK_PROTECTION) || defined(CONFIG_USERSPACE) \ + || defined(CONFIG_THREAD_STACK_MEM_MAPPED) #define Z_X86_STACK_BASE_ALIGN CONFIG_MMU_PAGE_SIZE #else #define Z_X86_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN #endif -#ifdef CONFIG_USERSPACE +#if defined(CONFIG_USERSPACE) || defined(CONFIG_THREAD_STACK_MEM_MAPPED) /* If user mode enabled, expand any stack size to fill a page since that is * the access control granularity and we don't want other kernel data to * unintentionally fall in the latter part of the page + * + * This is also true when memory mapped stacks are used with since + * access control applies to one page at a time. */ #define Z_X86_STACK_SIZE_ALIGN CONFIG_MMU_PAGE_SIZE #else @@ -34,17 +38,38 @@ /* With both hardware stack protection and userspace enabled, stacks are * arranged as follows: * + * --- Without stack being memory mapped: * High memory addresses * +-----------------------------------------+ * | Thread stack (varies) | * +-----------------------------------------+ * | Privilege elevation stack | - * | (4096 bytes) | + * | (CONFIG_PRIVILEGED_STACK_SIZE) | * +-----------------------------------------+ * | Guard page (4096 bytes) | + * | - 'guard_page' in struct | + * | z_x86_thread_stack_header | + * +-----------------------------------------+ + * Low Memory addresses + * + * --- With stack being memory mapped: + * High memory addresses + * +-----------------------------------------+ + * | Guard page (empty page) | + * +-----------------------------------------+ + * | Thread stack (varies) | + * +-----------------------------------------+ + * | Privilege elevation stack | + * | (CONFIG_PRIVILEGED_STACK_SIZE) | + * +-----------------------------------------+ + * | Guard page (empty page) | * +-----------------------------------------+ * Low Memory addresses * + * Without memory mapped stacks, the guard page is actually allocated + * as part of the stack struct, which takes up physical memory during + * linking. + * * Privilege elevation stacks are fixed-size. All the pages containing the * thread stack are marked as user-accessible. The guard page is marked * read-only to catch stack overflows in supervisor mode. @@ -62,7 +87,7 @@ * privileged mode stack. */ struct z_x86_thread_stack_header { -#ifdef CONFIG_X86_STACK_PROTECTION +#if defined(CONFIG_X86_STACK_PROTECTION) && !defined(CONFIG_THREAD_STACK_MEM_MAPPED) char guard_page[CONFIG_MMU_PAGE_SIZE]; #endif #ifdef CONFIG_USERSPACE