Browse Source

aarch64: Fix registers naming in cpu.h

The name for registers and bit-field in the cpu.h file is incoherent and
messy. Refactor the whole file using the proper suffixes for bits,
shifts and masks.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
pull/32737/head
Carlo Caione 4 years ago committed by Anas Nashif
parent
commit
a2226f5200
  1. 8
      arch/arm/core/aarch64/cpu_idle.S
  2. 8
      arch/arm/core/aarch64/fatal.c
  3. 4
      arch/arm/core/aarch64/isr_wrapper.S
  4. 4
      arch/arm/core/aarch64/mmu/arm_mmu.c
  5. 23
      arch/arm/core/aarch64/reset.S
  6. 2
      arch/arm/core/aarch64/thread.c
  7. 8
      drivers/interrupt_controller/intc_gicv3.c
  8. 4
      include/arch/arm/aarch64/asm_inline_gcc.h
  9. 217
      include/arch/arm/aarch64/cpu.h
  10. 11
      include/arch/arm/aarch64/timer.h
  11. 6
      soc/arm/bcm_vk/viper/plat_core.S
  12. 4
      soc/arm/qemu_cortex_a53/plat_core.S

8
arch/arm/core/aarch64/cpu_idle.S

@ -23,7 +23,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle) @@ -23,7 +23,7 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
#endif
dsb sy
wfi
msr daifclr, #(DAIFSET_IRQ)
msr daifclr, #(DAIFCLR_IRQ_BIT)
ret
GTEXT(arch_cpu_atomic_idle)
@ -33,11 +33,11 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle) @@ -33,11 +33,11 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
bl sys_trace_idle
ldp x0, x30, [sp], #16
#endif
msr daifset, #(DAIFSET_IRQ)
msr daifset, #(DAIFSET_IRQ_BIT)
isb
wfe
tst x0, #(DAIF_IRQ)
tst x0, #(DAIF_IRQ_BIT)
beq _irq_disabled
msr daifclr, #(DAIFSET_IRQ)
msr daifclr, #(DAIFCLR_IRQ_BIT)
_irq_disabled:
ret

8
arch/arm/core/aarch64/fatal.c

@ -23,7 +23,7 @@ static void dump_esr(uint64_t esr, bool *dump_far) @@ -23,7 +23,7 @@ static void dump_esr(uint64_t esr, bool *dump_far)
{
const char *err;
switch (ESR_EC(esr)) {
switch (GET_ESR_EC(esr)) {
case 0b000000: /* 0x00 */
err = "Unknown reason";
break;
@ -142,9 +142,9 @@ static void dump_esr(uint64_t esr, bool *dump_far) @@ -142,9 +142,9 @@ static void dump_esr(uint64_t esr, bool *dump_far)
}
LOG_ERR("ESR_ELn: 0x%016llx", esr);
LOG_ERR(" EC: 0x%llx (%s)", ESR_EC(esr), err);
LOG_ERR(" IL: 0x%llx", ESR_IL(esr));
LOG_ERR(" ISS: 0x%llx", ESR_ISS(esr));
LOG_ERR(" EC: 0x%llx (%s)", GET_ESR_EC(esr), err);
LOG_ERR(" IL: 0x%llx", GET_ESR_IL(esr));
LOG_ERR(" ISS: 0x%llx", GET_ESR_ISS(esr));
}
static void esf_dump(const z_arch_esf_t *esf)

4
arch/arm/core/aarch64/isr_wrapper.S

@ -61,9 +61,9 @@ SECTION_FUNC(TEXT, _isr_wrapper) @@ -61,9 +61,9 @@ SECTION_FUNC(TEXT, _isr_wrapper)
* Call the ISR. Unmask and mask again the IRQs to support nested
* exception handlers
*/
msr daifclr, #(DAIFSET_IRQ)
msr daifclr, #(DAIFCLR_IRQ_BIT)
blr x3
msr daifset, #(DAIFSET_IRQ)
msr daifset, #(DAIFSET_IRQ_BIT)
/* Signal end-of-interrupt */
ldp x0, x1, [sp], #16

4
arch/arm/core/aarch64/mmu/arm_mmu.c

@ -511,7 +511,7 @@ static void enable_mmu_el1(struct arm_mmu_ptables *ptables, unsigned int flags) @@ -511,7 +511,7 @@ static void enable_mmu_el1(struct arm_mmu_ptables *ptables, unsigned int flags)
__asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
__asm__ volatile("msr sctlr_el1, %0"
:
: "r" (val | SCTLR_M | SCTLR_C)
: "r" (val | SCTLR_M_BIT | SCTLR_C_BIT)
: "memory", "cc");
/* Ensure the MMU enable takes effect immediately */
@ -546,7 +546,7 @@ void z_arm64_mmu_init(void) @@ -546,7 +546,7 @@ void z_arm64_mmu_init(void)
/* Ensure that MMU is already not enabled */
__asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
__ASSERT((val & SCTLR_M) == 0, "MMU is already enabled\n");
__ASSERT((val & SCTLR_M_BIT) == 0, "MMU is already enabled\n");
kernel_ptables.base_xlat_table = new_table();
setup_page_tables(&kernel_ptables);

23
arch/arm/core/aarch64/reset.S

@ -77,10 +77,10 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) @@ -77,10 +77,10 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
* Disable access traps to EL3 for CPACR, Trace, FP, ASIMD,
* SVE from lower EL.
*/
mov_imm x0, CPTR_EL3_RES_VAL
mov_imm x1, (CPTR_EL3_TTA | CPTR_EL3_TFP | CPTR_EL3_TCPAC)
mov_imm x0, CPTR_EL3_RES0
mov_imm x1, (CPTR_EL3_TTA_BIT | CPTR_EL3_TFP_BIT | CPTR_EL3_TCPAC_BIT)
bic x0, x0, x1
orr x0, x0, #(CPTR_EL3_EZ)
orr x0, x0, #(CPTR_EL3_EZ_BIT)
msr cptr_el3, x0
isb
@ -89,9 +89,9 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) @@ -89,9 +89,9 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Enable access control configuration from lower EL */
mrs x0, actlr_el3
orr x0, x0, #(ACTLR_EL3_L2ACTLR | ACTLR_EL3_L2ECTLR \
| ACTLR_EL3_L2CTLR)
orr x0, x0, #(ACTLR_EL3_CPUACTLR | ACTLR_EL3_CPUECTLR)
orr x0, x0, #(ACTLR_EL3_L2ACTLR_BIT | ACTLR_EL3_L2ECTLR_BIT \
| ACTLR_EL3_L2CTLR_BIT)
orr x0, x0, #(ACTLR_EL3_CPUACTLR_BIT | ACTLR_EL3_CPUECTLR_BIT)
msr actlr_el3, x0
/* Initialize SCTLR_EL1 to reset value */
@ -100,14 +100,11 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) @@ -100,14 +100,11 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Disable EA/IRQ/FIQ routing to EL3 and set EL1 to AArch64 */
mov x0, xzr
orr x0, x0, #(SCR_EL3_RW)
orr x0, x0, #(SCR_RW_BIT)
msr scr_el3, x0
/* On eret return to secure EL1h with DAIF masked */
mov x0, xzr
orr x0, x0, #(DAIF_MASK)
orr x0, x0, #(SPSR_EL3_TO_EL1)
orr x0, x0, #(SPSR_EL3_h)
mov_imm x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1H)
msr spsr_el3, x0
adr x0, 1f
@ -135,7 +132,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) @@ -135,7 +132,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
msr cpacr_el1, x0
/* Enable the instruction cache and EL1 stack alignment check. */
mov_imm x1, (SCTLR_I | SCTLR_SA)
mov_imm x1, (SCTLR_I_BIT | SCTLR_SA_BIT)
mrs x0, sctlr_el1
orr x0, x0, x1
msr sctlr_el1, x0
@ -144,6 +141,6 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) @@ -144,6 +141,6 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
isb
/* Enable the SError interrupt */
msr daifclr, #(DAIFSET_ABT)
msr daifclr, #(DAIFCLR_ABT_BIT)
bl z_arm64_prep_c

2
arch/arm/core/aarch64/thread.c

@ -41,7 +41,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, @@ -41,7 +41,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* - SPSR_ELn: to enable IRQs (we are masking FIQs).
*/
pInitCtx->elr = (uint64_t)z_thread_entry;
pInitCtx->spsr = SPSR_MODE_EL1T | DAIF_FIQ;
pInitCtx->spsr = SPSR_MODE_EL1T | DAIF_FIQ_BIT;
/*
* We are saving SP to pop out entry and parameters when going through

8
drivers/interrupt_controller/intc_gicv3.c

@ -211,13 +211,13 @@ static void gicv3_cpuif_init(void) @@ -211,13 +211,13 @@ static void gicv3_cpuif_init(void)
*/
icc_sre = read_sysreg(ICC_SRE_EL1);
if (!(icc_sre & ICC_SRE_ELx_SRE)) {
icc_sre = (icc_sre | ICC_SRE_ELx_SRE |
ICC_SRE_ELx_DIB | ICC_SRE_ELx_DFB);
if (!(icc_sre & ICC_SRE_ELx_SRE_BIT)) {
icc_sre = (icc_sre | ICC_SRE_ELx_SRE_BIT |
ICC_SRE_ELx_DIB_BIT | ICC_SRE_ELx_DFB_BIT);
write_sysreg(icc_sre, ICC_SRE_EL1);
icc_sre = read_sysreg(ICC_SRE_EL1);
__ASSERT_NO_MSG(icc_sre & ICC_SRE_ELx_SRE);
__ASSERT_NO_MSG(icc_sre & ICC_SRE_ELx_SRE_BIT);
}
write_sysreg(GIC_IDLE_PRIO, ICC_PMR_EL1);

4
include/arch/arm/aarch64/asm_inline_gcc.h

@ -49,7 +49,7 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void) @@ -49,7 +49,7 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void)
__asm__ volatile("mrs %0, daif;"
"msr daifset, %1;"
: "=r" (key)
: "i" (DAIFSET_IRQ)
: "i" (DAIFSET_IRQ_BIT)
: "memory");
return key;
@ -66,7 +66,7 @@ static ALWAYS_INLINE void arch_irq_unlock(unsigned int key) @@ -66,7 +66,7 @@ static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
/* We only check the (I)RQ bit on the DAIF register */
return (key & DAIF_IRQ) == 0;
return (key & DAIF_IRQ_BIT) == 0;
}
#ifdef __cplusplus

217
include/arch/arm/aarch64/cpu.h

@ -9,64 +9,129 @@ @@ -9,64 +9,129 @@
#include <sys/util.h>
#define DAIFSET_FIQ BIT(0)
#define DAIFSET_IRQ BIT(1)
#define DAIFSET_ABT BIT(2)
#define DAIFSET_DBG BIT(3)
#define DAIFSET_FIQ_BIT BIT(0)
#define DAIFSET_IRQ_BIT BIT(1)
#define DAIFSET_ABT_BIT BIT(2)
#define DAIFSET_DBG_BIT BIT(3)
#define DAIF_FIQ BIT(6)
#define DAIF_IRQ BIT(7)
#define DAIF_ABT BIT(8)
#define DAIF_DBG BIT(9)
#define DAIF_MASK (0xf << 6)
#define DAIFCLR_FIQ_BIT BIT(0)
#define DAIFCLR_IRQ_BIT BIT(1)
#define DAIFCLR_ABT_BIT BIT(2)
#define DAIFCLR_DBG_BIT BIT(3)
#define DAIF_FIQ_BIT BIT(6)
#define DAIF_IRQ_BIT BIT(7)
#define DAIF_ABT_BIT BIT(8)
#define DAIF_DBG_BIT BIT(9)
#define SPSR_DAIF_SHIFT (6)
#define SPSR_DAIF_MASK (0xf << SPSR_DAIF_SHIFT)
#define SPSR_MODE_EL0T (0x0)
#define SPSR_MODE_EL1T (0x4)
#define SPSR_MODE_EL1H (0x5)
#define SPSR_MODE_EL2T (0x8)
#define SPSR_MODE_EL2H (0x9)
#define SCTLR_EL3_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(18) | BIT(16) | \
BIT(11) | BIT(5) | BIT(4))
#define SCTLR_EL2_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(18) | BIT(16) | \
BIT(11) | BIT(5) | BIT(4))
#define SCTLR_EL1_RES1 (BIT(29) | BIT(28) | BIT(23) | \
BIT(22) | BIT(20) | BIT(11))
#define SCTLR_M BIT(0)
#define SCTLR_A BIT(1)
#define SCTLR_C BIT(2)
#define SCTLR_SA BIT(3)
#define SCTLR_I BIT(12)
#define SCTLR_M_BIT BIT(0)
#define SCTLR_A_BIT BIT(1)
#define SCTLR_C_BIT BIT(2)
#define SCTLR_SA_BIT BIT(3)
#define SCTLR_I_BIT BIT(12)
#define CPACR_EL1_FPEN_NOTRAP (0x3 << 20)
#define SCR_EL3_NS BIT(0)
#define SCR_EL3_IRQ BIT(1)
#define SCR_EL3_FIQ BIT(2)
#define SCR_EL3_EA BIT(3)
#define SCR_EL3_RW BIT(10)
#define SCR_NS_BIT BIT(0)
#define SCR_IRQ_BIT BIT(1)
#define SCR_FIQ_BIT BIT(2)
#define SCR_EA_BIT BIT(3)
#define SCR_SMD_BIT BIT(7)
#define SCR_HCE_BIT BIT(8)
#define SCR_RW_BIT BIT(10)
#define SCR_ST_BIT BIT(11)
#define SCR_RES1 (BIT(4) | BIT(5))
/* MPIDR */
#define MPIDR_AFFLVL_MASK (0xff)
#define MPIDR_AFF0_SHIFT (0)
#define MPIDR_AFF1_SHIFT (8)
#define MPIDR_AFF2_SHIFT (16)
#define MPIDR_AFF3_SHIFT (32)
#define MPIDR_AFFLVL(mpidr, aff_level) \
(((mpidr) >> MPIDR_AFF##aff_level##_SHIFT) & MPIDR_AFFLVL_MASK)
#define GET_MPIDR() read_sysreg(mpidr_el1)
#define MPIDR_TO_CORE(mpidr) MPIDR_AFFLVL(mpidr, 0)
#define MODE_EL_SHIFT (0x2)
#define MODE_EL_MASK (0x3)
#define MODE_EL3 (0x3)
#define MODE_EL2 (0x2)
#define MODE_EL1 (0x1)
#define MODE_EL0 (0x0)
#define GET_EL(_mode) (((_mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
#define ESR_EC_SHIFT (26)
#define ESR_EC_MASK BIT_MASK(6)
#define ESR_ISS_SHIFT (0)
#define ESR_ISS_MASK BIT_MASK(25)
#define ESR_IL_SHIFT (25)
#define ESR_IL_MASK BIT_MASK(1)
#define GET_ESR_EC(esr) (((esr) >> ESR_EC_SHIFT) & ESR_EC_MASK)
#define GET_ESR_IL(esr) (((esr) >> ESR_IL_SHIFT) & ESR_IL_MASK)
#define GET_ESR_ISS(esr) (((esr) >> ESR_ISS_SHIFT) & ESR_ISS_MASK)
#define CNTV_CTL_ENABLE_BIT BIT(0)
#define CNTV_CTL_IMASK_BIT BIT(1)
#define ID_AA64PFR0_EL0_SHIFT (0)
#define ID_AA64PFR0_EL1_SHIFT (4)
#define ID_AA64PFR0_EL2_SHIFT (8)
#define ID_AA64PFR0_EL3_SHIFT (12)
#define ID_AA64PFR0_ELX_MASK (0xf)
#define ID_AA64PFR0_SEL2_SHIFT (36)
#define ID_AA64PFR0_SEL2_MASK (0xf)
/*
* TODO: ACTLR is of class implementation defined. All core implementations
* in armv8a have the same implementation so far w.r.t few controls.
* When there will be differences we have to create core specific headers.
*/
#define ACTLR_EL3_CPUACTLR BIT(0)
#define ACTLR_EL3_CPUECTLR BIT(1)
#define ACTLR_EL3_L2CTLR BIT(4)
#define ACTLR_EL3_L2ECTLR BIT(5)
#define ACTLR_EL3_L2ACTLR BIT(6)
#define ACTLR_EL3_CPUACTLR_BIT BIT(0)
#define ACTLR_EL3_CPUECTLR_BIT BIT(1)
#define ACTLR_EL3_L2CTLR_BIT BIT(4)
#define ACTLR_EL3_L2ECTLR_BIT BIT(5)
#define ACTLR_EL3_L2ACTLR_BIT BIT(6)
#define CPTR_EL3_RES_VAL (0x0)
#define CPTR_EL3_EZ BIT(8)
#define CPTR_EL3_TFP BIT(9)
#define CPTR_EL3_TTA BIT(20)
#define CPTR_EL3_TCPAC BIT(31)
#define CPTR_EZ_BIT BIT(8)
#define CPTR_TFP_BIT BIT(10)
#define CPTR_TTA_BIT BIT(20)
#define CPTR_TCPAC_BIT BIT(31)
#define HCR_EL2_FMO BIT(3)
#define HCR_EL2_IMO BIT(4)
#define HCR_EL2_AMO BIT(5)
#define CPTR_EL2_RES1 BIT(13) | BIT(12) | BIT(9) | (0xff)
#define SPSR_EL3_h BIT(0)
#define SPSR_EL3_TO_EL1 (0x2 << 1)
#define HCR_FMO_BIT BIT(3)
#define HCR_IMO_BIT BIT(4)
#define HCR_AMO_BIT BIT(5)
#define HCR_RW_BIT BIT(31)
/* System register interface to GICv3 */
#define ICC_IGRPEN1_EL1 S3_0_C12_C12_7
@ -89,23 +154,22 @@ @@ -89,23 +154,22 @@
#define ICC_SGI0R_EL1 S3_0_C12_C11_7
/* register constants */
#define ICC_SRE_ELx_SRE BIT(0)
#define ICC_SRE_ELx_DFB BIT(1)
#define ICC_SRE_ELx_DIB BIT(2)
#define ICC_SRE_EL3_EN BIT(3)
#define ICC_SRE_ELx_SRE_BIT BIT(0)
#define ICC_SRE_ELx_DFB_BIT BIT(1)
#define ICC_SRE_ELx_DIB_BIT BIT(2)
#define ICC_SRE_EL3_EN_BIT BIT(3)
/* ICC SGI macros */
#define SGIR_TGT_MASK 0xffff
#define SGIR_AFF1_SHIFT 16
#define SGIR_INTID_SHIFT 24
#define SGIR_INTID_MASK 0xf
#define SGIR_AFF2_SHIFT 32
#define SGIR_IRM_SHIFT 40
#define SGIR_IRM_MASK 0x1
#define SGIR_AFF3_SHIFT 48
#define SGIR_AFF_MASK 0xf
#define SGIR_IRM_TO_AFF 0
#define SGIR_TGT_MASK (0xffff)
#define SGIR_AFF1_SHIFT (16)
#define SGIR_AFF2_SHIFT (32)
#define SGIR_AFF3_SHIFT (48)
#define SGIR_AFF_MASK (0xf)
#define SGIR_INTID_SHIFT (24)
#define SGIR_INTID_MASK (0xf)
#define SGIR_IRM_SHIFT (40)
#define SGIR_IRM_MASK (0x1)
#define SGIR_IRM_TO_AFF (0)
#define GICV3_SGIR_VALUE(_aff3, _aff2, _aff1, _intid, _irm, _tgt) \
((((uint64_t) (_aff3) & SGIR_AFF_MASK) << SGIR_AFF3_SHIFT) | \
@ -119,26 +183,26 @@ @@ -119,26 +183,26 @@
#if defined(CONFIG_CPU_CORTEX_A72)
#define CORTEX_A72_L2CTLR_EL1 S3_1_C11_C0_2
#define CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT 0
#define CORTEX_A72_L2CTLR_DATA_RAM_SETUP_SHIFT 5
#define CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT 6
#define CORTEX_A72_L2CTLR_TAG_RAM_SETUP_SHIFT 9
#define CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES 2
#define CORTEX_A72_L2_DATA_RAM_LATENCY_MASK 7
#define CORTEX_A72_L2_DATA_RAM_SETUP_1_CYCLE 1
#define CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES 1
#define CORTEX_A72_L2_TAG_RAM_LATENCY_3_CYCLES 2
#define CORTEX_A72_L2_TAG_RAM_LATENCY_MASK 7
#define CORTEX_A72_L2_TAG_RAM_SETUP_1_CYCLE 1
#define CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT (0)
#define CORTEX_A72_L2CTLR_DATA_RAM_SETUP_SHIFT (5)
#define CORTEX_A72_L2CTLR_TAG_RAM_LATENCY_SHIFT (6)
#define CORTEX_A72_L2CTLR_TAG_RAM_SETUP_SHIFT (9)
#define CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES (2)
#define CORTEX_A72_L2_DATA_RAM_LATENCY_MASK (0x7)
#define CORTEX_A72_L2_DATA_RAM_SETUP_1_CYCLE (1)
#define CORTEX_A72_L2_TAG_RAM_LATENCY_2_CYCLES (1)
#define CORTEX_A72_L2_TAG_RAM_LATENCY_3_CYCLES (2)
#define CORTEX_A72_L2_TAG_RAM_LATENCY_MASK (0x7)
#define CORTEX_A72_L2_TAG_RAM_SETUP_1_CYCLE (1)
#define CORTEX_A72_L2ACTLR_EL1 S3_1_C15_C0_0
#define CORTEX_A72_L2ACTLR_DISABLE_ACE_SH_OR_CHI_BIT BIT(6)
#define CORTEX_A72_L2ACTLR_DISABLE_ACE_SH_OR_CHI (1 << 6)
#endif /* CONFIG_CPU_CORTEX_A72 */
#ifndef _ASMLANGUAGE
/* Core sysreg macros */
#define read_sysreg(reg) ({ \
uint64_t val; \
@ -152,31 +216,4 @@ @@ -152,31 +216,4 @@
#endif /* !_ASMLANGUAGE */
#define MODE_EL_SHIFT (0x2)
#define MODE_EL_MASK (0x3)
#define MODE_EL3 (0x3)
#define MODE_EL2 (0x2)
#define MODE_EL1 (0x1)
#define MODE_EL0 (0x0)
#define GET_EL(_mode) (((_mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
#define ESR_EC(esr) (((esr) >> 26) & BIT_MASK(6))
#define ESR_IL(esr) (((esr) >> 25) & BIT_MASK(1))
#define ESR_ISS(esr) ((esr) & BIT_MASK(25))
/* mpidr */
#define MPIDR_AFFLVL_MASK 0xff
#define MPIDR_AFF0_SHIFT 0
#define MPIDR_AFF1_SHIFT 8
#define MPIDR_AFF2_SHIFT 16
#define MPIDR_AFF3_SHIFT 32
#define MPIDR_AFFLVL(mpidr, aff_level) \
(((mpidr) >> MPIDR_AFF##aff_level##_SHIFT) & MPIDR_AFFLVL_MASK)
#define GET_MPIDR() read_sysreg(mpidr_el1)
#define MPIDR_TO_CORE(mpidr) MPIDR_AFFLVL(mpidr, 0)
#endif /* ZEPHYR_INCLUDE_ARCH_ARM_AARCH64_CPU_H_ */

11
include/arch/arm/aarch64/timer.h

@ -20,9 +20,6 @@ extern "C" { @@ -20,9 +20,6 @@ extern "C" {
#define ARM_ARCH_TIMER_PRIO ARM_TIMER_VIRTUAL_PRIO
#define ARM_ARCH_TIMER_FLAGS ARM_TIMER_VIRTUAL_FLAGS
#define CNTV_CTL_ENABLE ((1) << 0)
#define CNTV_CTL_IMASK ((1) << 1)
static ALWAYS_INLINE void arm_arch_timer_init(void)
{
}
@ -41,9 +38,9 @@ static ALWAYS_INLINE void arm_arch_timer_enable(unsigned char enable) @@ -41,9 +38,9 @@ static ALWAYS_INLINE void arm_arch_timer_enable(unsigned char enable)
: "=r" (cntv_ctl) : : "memory");
if (enable) {
cntv_ctl |= CNTV_CTL_ENABLE;
cntv_ctl |= CNTV_CTL_ENABLE_BIT;
} else {
cntv_ctl &= ~CNTV_CTL_ENABLE;
cntv_ctl &= ~CNTV_CTL_ENABLE_BIT;
}
__asm__ volatile("msr cntv_ctl_el0, %0\n\t"
@ -58,9 +55,9 @@ static ALWAYS_INLINE void arm_arch_timer_set_irq_mask(bool mask) @@ -58,9 +55,9 @@ static ALWAYS_INLINE void arm_arch_timer_set_irq_mask(bool mask)
: "=r" (cntv_ctl) : : "memory");
if (mask) {
cntv_ctl |= CNTV_CTL_IMASK;
cntv_ctl |= CNTV_CTL_IMASK_BIT;
} else {
cntv_ctl &= ~CNTV_CTL_IMASK;
cntv_ctl &= ~CNTV_CTL_IMASK_BIT;
}
__asm__ volatile("msr cntv_ctl_el0, %0\n\t"

6
soc/arm/bcm_vk/viper/plat_core.S

@ -22,8 +22,8 @@ SECTION_FUNC(TEXT, z_arch_el3_plat_init) @@ -22,8 +22,8 @@ SECTION_FUNC(TEXT, z_arch_el3_plat_init)
mov x20, x30
/* Enable GIC v3 system interface */
mov_imm x0, (ICC_SRE_ELx_DFB | ICC_SRE_ELx_DIB | \
ICC_SRE_ELx_SRE | ICC_SRE_EL3_EN)
mov_imm x0, (ICC_SRE_ELx_DFB_BIT | ICC_SRE_ELx_DIB_BIT | \
ICC_SRE_ELx_SRE_BIT | ICC_SRE_EL3_EN_BIT)
msr ICC_SRE_EL3, x0
/* L2 config */
bl plat_l2_init
@ -38,7 +38,7 @@ SECTION_FUNC(TEXT,plat_l2_init) @@ -38,7 +38,7 @@ SECTION_FUNC(TEXT,plat_l2_init)
*/
/* Disable cluster coherency */
mrs x0, CORTEX_A72_L2ACTLR_EL1
orr x0, x0, #CORTEX_A72_L2ACTLR_DISABLE_ACE_SH_OR_CHI
orr x0, x0, #CORTEX_A72_L2ACTLR_DISABLE_ACE_SH_OR_CHI_BIT
msr CORTEX_A72_L2ACTLR_EL1, x0
/* Set L2 Control Register */

4
soc/arm/qemu_cortex_a53/plat_core.S

@ -23,8 +23,8 @@ SECTION_FUNC(TEXT, z_arch_el3_plat_init) @@ -23,8 +23,8 @@ SECTION_FUNC(TEXT, z_arch_el3_plat_init)
#ifdef CONFIG_GIC_V3
/* Enable GIC v3 system interface */
mov_imm x0, (ICC_SRE_ELx_DFB | ICC_SRE_ELx_DIB | \
ICC_SRE_ELx_SRE | ICC_SRE_EL3_EN)
mov_imm x0, (ICC_SRE_ELx_DFB_BIT | ICC_SRE_ELx_DIB_BIT | \
ICC_SRE_ELx_SRE_BIT | ICC_SRE_EL3_EN_BIT)
msr ICC_SRE_EL3, x0
#endif

Loading…
Cancel
Save