Browse Source

kernel: mm: rename Z_MEM_PHYS/VIRT_ADDR to K_MEM_*

This is part of a series to move memory management functions
away from the z_ namespace and into its own namespace. Also
make documentation available via doxygen.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
pull/68179/head
Daniel Leung 1 year ago committed by Anas Nashif
parent
commit
db9d3134c5
  1. 2
      arch/x86/core/fatal.c
  2. 17
      arch/x86/core/ia32/crt0.S
  3. 4
      arch/x86/core/ia32/fatal.c
  4. 4
      arch/x86/core/ia32/userspace.S
  5. 4
      arch/x86/core/intel64/locore.S
  6. 2
      arch/x86/core/intel64/userspace.S
  7. 16
      arch/x86/core/x86_mmu.c
  8. 4
      arch/x86/include/x86_mmu.h
  9. 2
      boards/qemu/x86/qemu_x86_tiny.ld
  10. 2
      drivers/ethernet/eth_dwmac_mmu.c
  11. 4
      drivers/sdhc/rcar_mmc.c
  12. 4
      drivers/watchdog/wdt_andes_atcwdt200.c
  13. 2
      include/zephyr/arch/x86/ia32/linker.ld
  14. 55
      include/zephyr/kernel/internal/mm.h
  15. 2
      kernel/Kconfig.vm
  16. 20
      tests/kernel/mem_protect/mem_map/src/main.c

2
arch/x86/core/fatal.c

@ -208,7 +208,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf) @@ -208,7 +208,7 @@ static inline uintptr_t get_cr3(const struct arch_esf *esf)
static inline pentry_t *get_ptables(const struct arch_esf *esf)
{
return z_mem_virt_addr(get_cr3(esf));
return k_mem_virt_addr(get_cr3(esf));
}
#ifdef CONFIG_X86_64

17
arch/x86/core/ia32/crt0.S

@ -60,7 +60,7 @@ @@ -60,7 +60,7 @@
* Until we enable these page tables, only physical memory addresses
* work.
*/
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
movl %eax, %cr3
#ifdef CONFIG_X86_PAE
@ -126,7 +126,7 @@ SECTION_FUNC(BOOT_TEXT, __start) @@ -126,7 +126,7 @@ SECTION_FUNC(BOOT_TEXT, __start)
*/
#if CONFIG_SET_GDT
/* load 32-bit operand size GDT */
lgdt Z_MEM_PHYS_ADDR(_gdt_rom)
lgdt K_MEM_PHYS_ADDR(_gdt_rom)
/* If we set our own GDT, update the segment registers as well.
*/
@ -138,7 +138,7 @@ SECTION_FUNC(BOOT_TEXT, __start) @@ -138,7 +138,7 @@ SECTION_FUNC(BOOT_TEXT, __start)
movw %ax, %fs /* Zero FS */
movw %ax, %gs /* Zero GS */
ljmp $CODE_SEG, $Z_MEM_PHYS_ADDR(__csSet) /* set CS = 0x08 */
ljmp $CODE_SEG, $K_MEM_PHYS_ADDR(__csSet) /* set CS = 0x08 */
__csSet:
#endif /* CONFIG_SET_GDT */
@ -180,7 +180,8 @@ __csSet: @@ -180,7 +180,8 @@ __csSet:
andl $~0x400, %eax /* CR4[OSXMMEXCPT] = 0 */
movl %eax, %cr4 /* move EAX to CR4 */
ldmxcsr Z_MEM_PHYS_ADDR(_sse_mxcsr_default_value) /* initialize SSE control/status reg */
/* initialize SSE control/status reg */
ldmxcsr K_MEM_PHYS_ADDR(_sse_mxcsr_default_value)
#endif /* CONFIG_X86_SSE */
@ -199,7 +200,7 @@ __csSet: @@ -199,7 +200,7 @@ __csSet:
*/
#ifdef CONFIG_INIT_STACKS
movl $0xAAAAAAAA, %eax
leal Z_MEM_PHYS_ADDR(z_interrupt_stacks), %edi
leal K_MEM_PHYS_ADDR(z_interrupt_stacks), %edi
#ifdef CONFIG_X86_STACK_PROTECTION
addl $4096, %edi
#endif
@ -208,7 +209,7 @@ __csSet: @@ -208,7 +209,7 @@ __csSet:
rep stosl
#endif
movl $Z_MEM_PHYS_ADDR(z_interrupt_stacks), %esp
movl $K_MEM_PHYS_ADDR(z_interrupt_stacks), %esp
#ifdef CONFIG_X86_STACK_PROTECTION
/* In this configuration, all stacks, including IRQ stack, are declared
* with a 4K non-present guard page preceding the stack buffer
@ -347,9 +348,9 @@ _gdt: @@ -347,9 +348,9 @@ _gdt:
* descriptor here */
/* Limit on GDT */
.word Z_MEM_PHYS_ADDR(_gdt_rom_end) - Z_MEM_PHYS_ADDR(_gdt_rom) - 1
.word K_MEM_PHYS_ADDR(_gdt_rom_end) - K_MEM_PHYS_ADDR(_gdt_rom) - 1
/* table address: _gdt_rom */
.long Z_MEM_PHYS_ADDR(_gdt_rom)
.long K_MEM_PHYS_ADDR(_gdt_rom)
.word 0x0000
/* Entry 1 (selector=0x0008): Code descriptor: DPL0 */

4
arch/x86/core/ia32/fatal.c

@ -156,7 +156,7 @@ struct task_state_segment _df_tss = { @@ -156,7 +156,7 @@ struct task_state_segment _df_tss = {
.ss = DATA_SEG,
.eip = (uint32_t)df_handler_top,
.cr3 = (uint32_t)
Z_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_x86_kernel_ptables[0]))
K_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_x86_kernel_ptables[0]))
};
__pinned_func
@ -213,7 +213,7 @@ static FUNC_NORETURN __used void df_handler_top(void) @@ -213,7 +213,7 @@ static FUNC_NORETURN __used void df_handler_top(void)
_main_tss.es = DATA_SEG;
_main_tss.ss = DATA_SEG;
_main_tss.eip = (uint32_t)df_handler_bottom;
_main_tss.cr3 = z_mem_phys_addr(z_x86_kernel_ptables);
_main_tss.cr3 = k_mem_phys_addr(z_x86_kernel_ptables);
_main_tss.eflags = 0U;
/* NT bit is set in EFLAGS so we will task switch back to _main_tss

4
arch/x86/core/ia32/userspace.S

@ -51,7 +51,7 @@ SECTION_FUNC(PINNED_TEXT, z_x86_trampoline_to_kernel) @@ -51,7 +51,7 @@ SECTION_FUNC(PINNED_TEXT, z_x86_trampoline_to_kernel)
pushl %edi
/* Switch to kernel page table */
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
movl %esi, %cr3
/* Save old trampoline stack pointer in %edi */
@ -156,7 +156,7 @@ SECTION_FUNC(TEXT, z_x86_syscall_entry_stub) @@ -156,7 +156,7 @@ SECTION_FUNC(TEXT, z_x86_syscall_entry_stub)
pushl %edi
/* Switch to kernel page table */
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %esi
movl %esi, %cr3
/* Save old trampoline stack pointer in %edi */

4
arch/x86/core/intel64/locore.S

@ -44,7 +44,7 @@ @@ -44,7 +44,7 @@
/* Page tables created at build time by gen_mmu.py
* NOTE: Presumes phys=virt
*/
movl $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
movl $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %eax
movl %eax, %cr3
set_efer
@ -66,7 +66,7 @@ @@ -66,7 +66,7 @@
clts
/* NOTE: Presumes phys=virt */
movq $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
movq $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
movq %rax, %cr3
set_efer

2
arch/x86/core/intel64/userspace.S

@ -87,7 +87,7 @@ z_x86_syscall_entry_stub: @@ -87,7 +87,7 @@ z_x86_syscall_entry_stub:
pushq %rax
/* NOTE: Presumes phys=virt */
movq $Z_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
movq $K_MEM_PHYS_ADDR(z_x86_kernel_ptables), %rax
movq %rax, %cr3
popq %rax
movq $0, -8(%rsp) /* Delete stashed RAX data */

16
arch/x86/core/x86_mmu.c

@ -313,7 +313,7 @@ static inline uintptr_t get_entry_phys(pentry_t entry, int level) @@ -313,7 +313,7 @@ static inline uintptr_t get_entry_phys(pentry_t entry, int level)
__pinned_func
static inline pentry_t *next_table(pentry_t entry, int level)
{
return z_mem_virt_addr(get_entry_phys(entry, level));
return k_mem_virt_addr(get_entry_phys(entry, level));
}
/* Number of table entries at this level */
@ -416,12 +416,12 @@ void z_x86_tlb_ipi(const void *arg) @@ -416,12 +416,12 @@ void z_x86_tlb_ipi(const void *arg)
* if KPTI is turned on
*/
ptables_phys = z_x86_cr3_get();
__ASSERT(ptables_phys == z_mem_phys_addr(&z_x86_kernel_ptables), "");
__ASSERT(ptables_phys == k_mem_phys_addr(&z_x86_kernel_ptables), "");
#else
/* We might have been moved to another memory domain, so always invoke
* z_x86_thread_page_tables_get() instead of using current CR3 value.
*/
ptables_phys = z_mem_phys_addr(z_x86_thread_page_tables_get(_current));
ptables_phys = k_mem_phys_addr(z_x86_thread_page_tables_get(_current));
#endif
/*
* In the future, we can consider making this smarter, such as
@ -661,7 +661,7 @@ static void dump_ptables(pentry_t *table, uint8_t *base, int level) @@ -661,7 +661,7 @@ static void dump_ptables(pentry_t *table, uint8_t *base, int level)
#endif
printk("%s at %p (0x%" PRIxPTR "): ", info->name, table,
z_mem_phys_addr(table));
k_mem_phys_addr(table));
if (level == 0) {
printk("entire address space\n");
} else {
@ -826,7 +826,7 @@ static inline pentry_t pte_finalize_value(pentry_t val, bool user_table, @@ -826,7 +826,7 @@ static inline pentry_t pte_finalize_value(pentry_t val, bool user_table,
{
#ifdef CONFIG_X86_KPTI
static const uintptr_t shared_phys_addr =
Z_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_shared_kernel_page_start));
K_MEM_PHYS_ADDR(POINTER_TO_UINT(&z_shared_kernel_page_start));
if (user_table && (val & MMU_US) == 0 && (val & MMU_P) != 0 &&
get_entry_phys(val, level) != shared_phys_addr) {
@ -1720,7 +1720,7 @@ static int copy_page_table(pentry_t *dst, pentry_t *src, int level) @@ -1720,7 +1720,7 @@ static int copy_page_table(pentry_t *dst, pentry_t *src, int level)
* cast needed for PAE case where sizeof(void *) and
* sizeof(pentry_t) are not the same.
*/
dst[i] = ((pentry_t)z_mem_phys_addr(child_dst) |
dst[i] = ((pentry_t)k_mem_phys_addr(child_dst) |
INT_FLAGS);
ret = copy_page_table(child_dst,
@ -1924,11 +1924,11 @@ int arch_mem_domain_thread_add(struct k_thread *thread) @@ -1924,11 +1924,11 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
* z_x86_current_stack_perms()
*/
if (is_migration) {
old_ptables = z_mem_virt_addr(thread->arch.ptables);
old_ptables = k_mem_virt_addr(thread->arch.ptables);
set_stack_perms(thread, domain->arch.ptables);
}
thread->arch.ptables = z_mem_phys_addr(domain->arch.ptables);
thread->arch.ptables = k_mem_phys_addr(domain->arch.ptables);
LOG_DBG("set thread %p page tables to 0x%" PRIxPTR, thread,
thread->arch.ptables);

4
arch/x86/include/x86_mmu.h

@ -182,7 +182,7 @@ static inline uintptr_t z_x86_cr3_get(void) @@ -182,7 +182,7 @@ static inline uintptr_t z_x86_cr3_get(void)
/* Return the virtual address of the page tables installed in this CPU in CR3 */
static inline pentry_t *z_x86_page_tables_get(void)
{
return z_mem_virt_addr(z_x86_cr3_get());
return k_mem_virt_addr(z_x86_cr3_get());
}
/* Return cr2 value, which contains the page fault linear address.
@ -215,7 +215,7 @@ static inline pentry_t *z_x86_thread_page_tables_get(struct k_thread *thread) @@ -215,7 +215,7 @@ static inline pentry_t *z_x86_thread_page_tables_get(struct k_thread *thread)
* the kernel's page tables and not the page tables associated
* with their memory domain.
*/
return z_mem_virt_addr(thread->arch.ptables);
return k_mem_virt_addr(thread->arch.ptables);
}
#else
ARG_UNUSED(thread);

2
boards/qemu/x86/qemu_x86_tiny.ld

@ -245,7 +245,7 @@ MEMORY @@ -245,7 +245,7 @@ MEMORY
*mpsc_pbuf.c.obj(.##lsect) \
*mpsc_pbuf.c.obj(.##lsect.*)
epoint = Z_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY);
epoint = K_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY);
ENTRY(epoint)
/* SECTIONS definitions */

2
drivers/ethernet/eth_dwmac_mmu.c

@ -47,7 +47,7 @@ void dwmac_platform_init(struct dwmac_priv *p) @@ -47,7 +47,7 @@ void dwmac_platform_init(struct dwmac_priv *p)
sys_cache_data_invd_range(dwmac_tx_rx_descriptors,
sizeof(dwmac_tx_rx_descriptors));
desc_phys_addr = z_mem_phys_addr(dwmac_tx_rx_descriptors);
desc_phys_addr = k_mem_phys_addr(dwmac_tx_rx_descriptors);
/* remap descriptor rings uncached */
k_mem_map_phys_bare(&desc_uncached_addr, desc_phys_addr,

4
drivers/sdhc/rcar_mmc.c

@ -566,7 +566,7 @@ static int rcar_mmc_dma_rx_tx_data(const struct device *dev, struct sdhc_data *d @@ -566,7 +566,7 @@ static int rcar_mmc_dma_rx_tx_data(const struct device *dev, struct sdhc_data *d
reg |= RCAR_MMC_EXTMODE_DMA_EN;
rcar_mmc_write_reg32(dev, RCAR_MMC_EXTMODE, reg);
dma_addr = z_mem_phys_addr(data->data);
dma_addr = k_mem_phys_addr(data->data);
rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_ADDR_L, dma_addr);
rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_ADDR_H, 0);
@ -830,7 +830,7 @@ static int rcar_mmc_rx_tx_data(const struct device *dev, struct sdhc_data *data, @@ -830,7 +830,7 @@ static int rcar_mmc_rx_tx_data(const struct device *dev, struct sdhc_data *data,
int ret = 0;
#ifdef CONFIG_RCAR_MMC_DMA_SUPPORT
if (!(z_mem_phys_addr(data->data) >> 32)) {
if (!(k_mem_phys_addr(data->data) >> 32)) {
ret = rcar_mmc_dma_rx_tx_data(dev, data, is_read);
} else
#endif

4
drivers/watchdog/wdt_andes_atcwdt200.c

@ -332,14 +332,14 @@ static int wdt_atcwdt200_init(const struct device *dev) @@ -332,14 +332,14 @@ static int wdt_atcwdt200_init(const struct device *dev)
ret = syscon_write_reg(syscon_dev, SMU_RESET_REGLO,
((uint32_t)((unsigned long)
Z_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY))));
K_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY))));
if (ret < 0) {
return -EINVAL;
}
ret = syscon_write_reg(syscon_dev, SMU_RESET_REGHI,
((uint32_t)((uint64_t)((unsigned long)
Z_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY)) >> 32)));
K_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY)) >> 32)));
if (ret < 0) {
return -EINVAL;
}

2
include/zephyr/arch/x86/ia32/linker.ld

@ -68,7 +68,7 @@ @@ -68,7 +68,7 @@
#define MMU_PAGE_ALIGN_PERM
#endif
epoint = Z_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY);
epoint = K_MEM_PHYS_ADDR(CONFIG_KERNEL_ENTRY);
ENTRY(epoint)
/* SECTIONS definitions */

55
include/zephyr/kernel/internal/mm.h

@ -44,8 +44,27 @@ @@ -44,8 +44,27 @@
#define K_MEM_VIRT_OFFSET 0
#endif /* CONFIG_MMU */
#define Z_MEM_PHYS_ADDR(virt) ((virt) - K_MEM_VIRT_OFFSET)
#define Z_MEM_VIRT_ADDR(phys) ((phys) + K_MEM_VIRT_OFFSET)
/**
* @brief Get physical address from virtual address.
*
* This only works in the kernel's permanent mapping of RAM.
*
* @param virt Virtual address
*
* @return Physical address.
*/
#define K_MEM_PHYS_ADDR(virt) ((virt) - K_MEM_VIRT_OFFSET)
/**
* @brief Get virtual address from physical address.
*
* This only works in the kernel's permanent mapping of RAM.
*
* @param phys Physical address
*
* @return Virtual address.
*/
#define K_MEM_VIRT_ADDR(phys) ((phys) + K_MEM_VIRT_OFFSET)
#if K_MEM_VIRT_OFFSET != 0
#define Z_VM_KERNEL 1
@ -61,8 +80,18 @@ @@ -61,8 +80,18 @@
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/mem_manage.h>
/* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
static inline uintptr_t z_mem_phys_addr(void *virt)
/**
* @brief Get physical address from virtual address.
*
* This only works in the kernel's permanent mapping of RAM.
*
* Just like K_MEM_PHYS_ADDR() but with type safety and assertions.
*
* @param virt Virtual address
*
* @return Physical address.
*/
static inline uintptr_t k_mem_phys_addr(void *virt)
{
uintptr_t addr = (uintptr_t)virt;
@ -101,11 +130,21 @@ static inline uintptr_t z_mem_phys_addr(void *virt) @@ -101,11 +130,21 @@ static inline uintptr_t z_mem_phys_addr(void *virt)
* the above checks won't be sufficient with demand paging
*/
return Z_MEM_PHYS_ADDR(addr);
return K_MEM_PHYS_ADDR(addr);
}
/* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
static inline void *z_mem_virt_addr(uintptr_t phys)
/**
* @brief Get virtual address from physical address.
*
* This only works in the kernel's permanent mapping of RAM.
*
* Just like K_MEM_VIRT_ADDR() but with type safety and assertions.
*
* @param phys Physical address
*
* @return Virtual address.
*/
static inline void *k_mem_virt_addr(uintptr_t phys)
{
#if defined(CONFIG_KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK)
__ASSERT(sys_mm_is_phys_addr_in_range(phys),
@ -128,7 +167,7 @@ static inline void *z_mem_virt_addr(uintptr_t phys) @@ -128,7 +167,7 @@ static inline void *z_mem_virt_addr(uintptr_t phys)
* the above check won't be sufficient with demand paging
*/
return (void *)Z_MEM_VIRT_ADDR(phys);
return (void *)K_MEM_VIRT_ADDR(phys);
}
#ifdef __cplusplus

2
kernel/Kconfig.vm

@ -202,7 +202,7 @@ config KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK @@ -202,7 +202,7 @@ config KERNEL_VM_USE_CUSTOM_MEM_RANGE_CHECK
bool
help
Use custom memory range check functions instead of the generic
checks in z_mem_phys_addr() and z_mem_virt_addr().
checks in k_mem_phys_addr() and k_mem_virt_addr().
sys_mm_is_phys_addr_in_range() and
sys_mm_is_virt_addr_in_range() must be implemented.

20
tests/kernel/mem_protect/mem_map/src/main.c

@ -60,11 +60,11 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_rw) @@ -60,11 +60,11 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
expect_fault = false;
/* Map in a page that allows writes */
k_mem_map_phys_bare(&mapped_rw, z_mem_phys_addr(buf),
k_mem_map_phys_bare(&mapped_rw, k_mem_phys_addr(buf),
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
/* Map again this time only allowing reads */
k_mem_map_phys_bare(&mapped_ro, z_mem_phys_addr(buf),
k_mem_map_phys_bare(&mapped_ro, k_mem_phys_addr(buf),
BUF_SIZE, BASE_FLAGS);
/* Initialize read-write buf with some bytes */
@ -138,7 +138,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_exec) @@ -138,7 +138,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_exec)
func = transplanted_function;
/* Now map with execution enabled and try to run the copied fn */
k_mem_map_phys_bare(&mapped_exec, z_mem_phys_addr(__test_mem_map_start),
k_mem_map_phys_bare(&mapped_exec, k_mem_phys_addr(__test_mem_map_start),
(uintptr_t)(__test_mem_map_end - __test_mem_map_start),
BASE_FLAGS | K_MEM_PERM_EXEC);
@ -147,7 +147,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_exec) @@ -147,7 +147,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_exec)
zassert_true(executed, "function did not execute");
/* Now map without execution and execution should now fail */
k_mem_map_phys_bare(&mapped_ro, z_mem_phys_addr(__test_mem_map_start),
k_mem_map_phys_bare(&mapped_ro, k_mem_phys_addr(__test_mem_map_start),
(uintptr_t)(__test_mem_map_end - __test_mem_map_start),
BASE_FLAGS);
@ -177,7 +177,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_side_effect) @@ -177,7 +177,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_side_effect)
* Show that by mapping test_page to an RO region, we can still
* modify test_page.
*/
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page),
k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page),
sizeof(test_page), BASE_FLAGS);
/* Should NOT fault */
@ -203,7 +203,7 @@ ZTEST(mem_map, test_k_mem_unmap_phys_bare) @@ -203,7 +203,7 @@ ZTEST(mem_map, test_k_mem_unmap_phys_bare)
expect_fault = false;
/* Map in a page that allows writes */
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page),
k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page),
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
/* Should NOT fault */
@ -230,7 +230,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr) @@ -230,7 +230,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr)
uint8_t *buf = test_page + BUF_OFFSET;
/* Map the buffer the first time. */
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(buf),
k_mem_map_phys_bare(&mapped, k_mem_phys_addr(buf),
BUF_SIZE, BASE_FLAGS);
printk("Mapped (1st time): %p\n", mapped);
@ -251,7 +251,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr) @@ -251,7 +251,7 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_unmap_reclaim_addr)
* It should give us back the same virtual address
* as above when it is mapped the first time.
*/
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(buf), BUF_SIZE, BASE_FLAGS);
k_mem_map_phys_bare(&mapped, k_mem_phys_addr(buf), BUF_SIZE, BASE_FLAGS);
printk("Mapped (2nd time): %p\n", mapped);
@ -508,7 +508,7 @@ ZTEST(mem_map_api, test_k_mem_map_user) @@ -508,7 +508,7 @@ ZTEST(mem_map_api, test_k_mem_map_user)
*/
expect_fault = false;
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page), sizeof(test_page),
BASE_FLAGS | K_MEM_PERM_RW | K_MEM_PERM_USER);
printk("mapped a page: %p - %p (with K_MEM_PERM_USER)\n", mapped,
@ -529,7 +529,7 @@ ZTEST(mem_map_api, test_k_mem_map_user) @@ -529,7 +529,7 @@ ZTEST(mem_map_api, test_k_mem_map_user)
*/
expect_fault = true;
k_mem_map_phys_bare(&mapped, z_mem_phys_addr(test_page), sizeof(test_page),
k_mem_map_phys_bare(&mapped, k_mem_phys_addr(test_page), sizeof(test_page),
BASE_FLAGS | K_MEM_PERM_RW);
printk("mapped a page: %p - %p (without K_MEM_PERM_USER)\n", mapped,

Loading…
Cancel
Save