Browse Source

arch: add MIPS architecture support

MIPS (Microprocessor without Interlocked Pipelined Stages) is a
instruction set architecture (ISA) developed by MIPS Computer
Systems, now MIPS Technologies.

This commit provides MIPS architecture support to Zephyr. It is
compatible with the MIPS32 Release 1 specification.

Signed-off-by: Antony Pavlov <antonynpavlov@gmail.com>
pull/41985/head
Antony Pavlov 5 years ago committed by Anas Nashif
parent
commit
0369998e61
  1. 2
      CODEOWNERS
  2. 8
      arch/Kconfig
  3. 16
      arch/mips/CMakeLists.txt
  4. 59
      arch/mips/Kconfig
  5. 16
      arch/mips/core/CMakeLists.txt
  6. 30
      arch/mips/core/cpu_idle.c
  7. 97
      arch/mips/core/fatal.c
  8. 106
      arch/mips/core/irq_manage.c
  9. 50
      arch/mips/core/irq_offload.c
  10. 285
      arch/mips/core/isr.S
  11. 54
      arch/mips/core/offsets/offsets.c
  12. 53
      arch/mips/core/prep_c.c
  13. 57
      arch/mips/core/reset.S
  14. 48
      arch/mips/core/swap.S
  15. 41
      arch/mips/core/thread.c
  16. 39
      arch/mips/include/kernel_arch_data.h
  17. 55
      arch/mips/include/kernel_arch_func.h
  18. 51
      arch/mips/include/mips/mipsregs.h
  19. 72
      arch/mips/include/mips/regdef.h
  20. 47
      arch/mips/include/offsets_short_arch.h
  21. 2
      include/arch/cpu.h
  22. 117
      include/arch/mips/arch.h
  23. 61
      include/arch/mips/exp.h
  24. 209
      include/arch/mips/linker.ld
  25. 54
      include/arch/mips/thread.h
  26. 2
      include/linker/linker-tool-gcc.h
  27. 2
      include/toolchain/common.h
  28. 6
      include/toolchain/gcc.h
  29. 2
      lib/libc/minimal/include/sys/types.h
  30. 2
      lib/os/cbprintf_packaged.c
  31. 3
      scripts/logging/dictionary/dictionary_parser/log_database.py
  32. 3
      subsys/debug/thread_info.c
  33. 2
      subsys/logging/log_core.c
  34. 8
      subsys/testsuite/include/interrupt_util.h

2
CODEOWNERS

@ -25,6 +25,7 @@
/arch/arm64/core/cortex_r/ @povergoing /arch/arm64/core/cortex_r/ @povergoing
/arch/arm64/core/xen/ @lorc @firscity /arch/arm64/core/xen/ @lorc @firscity
/arch/common/ @ioannisg @andyross /arch/common/ @ioannisg @andyross
/arch/mips/ @frantony
/soc/arc/snps_*/ @abrodkin @ruuddw @evgeniy-paltsev /soc/arc/snps_*/ @abrodkin @ruuddw @evgeniy-paltsev
/soc/nios2/ @nashif /soc/nios2/ @nashif
/soc/arm/ @MaureenHelm @galak @ioannisg /soc/arm/ @MaureenHelm @galak @ioannisg
@ -533,6 +534,7 @@
/include/arch/arm64/ @carlocaione /include/arch/arm64/ @carlocaione
/include/arch/arm64/cortex_r/ @povergoing /include/arch/arm64/cortex_r/ @povergoing
/include/arch/arm/aarch32/irq.h @carlocaione /include/arch/arm/aarch32/irq.h @carlocaione
/include/arch/mips/ @frantony
/include/arch/nios2/ @nashif /include/arch/nios2/ @nashif
/include/arch/nios2/arch.h @nashif /include/arch/nios2/arch.h @nashif
/include/arch/posix/ @aescolar @daor-oti /include/arch/posix/ @aescolar @daor-oti

8
arch/Kconfig

@ -49,6 +49,14 @@ config ARM64
help help
ARM64 (AArch64) architecture ARM64 (AArch64) architecture
config MIPS
bool
select ARCH_IS_SET
select ATOMIC_OPERATIONS_C
select HAS_DTS
help
MIPS architecture
config SPARC config SPARC
bool bool
select ARCH_IS_SET select ARCH_IS_SET

16
arch/mips/CMakeLists.txt

@ -0,0 +1,16 @@
#
# Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
#
# based on arch/riscv/CMakeLists.txt
#
# SPDX-License-Identifier: Apache-2.0
#
if(CONFIG_BIG_ENDIAN)
set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT "elf32-bigmips")
else()
set_property(GLOBAL PROPERTY PROPERTY_OUTPUT_FORMAT "elf32-littlemips")
endif()
add_subdirectory(core)
zephyr_include_directories(include)

59
arch/mips/Kconfig

@ -0,0 +1,59 @@
#
# Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
#
# based on arch/riscv/Kconfig
#
# SPDX-License-Identifier: Apache-2.0
#
menu "MIPS Options"
depends on MIPS
config ARCH
string
default "mips"
config GEN_ISR_TABLES
default y
config GEN_IRQ_VECTOR_TABLE
default n
config GEN_SW_ISR_TABLE
default y
config NUM_IRQS
int
# Bump the kernel default stack size values.
config MAIN_STACK_SIZE
default 4096 if COVERAGE_GCOV
default 2048
config IDLE_STACK_SIZE
default 1024
config ISR_STACK_SIZE
default 4096
config TEST_EXTRA_STACKSIZE
default 4096 if COVERAGE_GCOV
default 2048
config SYSTEM_WORKQUEUE_STACK_SIZE
default 4096
config CMSIS_THREAD_MAX_STACK_SIZE
default 2048
config CMSIS_V2_THREAD_MAX_STACK_SIZE
default 2048
config CMSIS_V2_THREAD_DYNAMIC_STACK_SIZE
default 2048
config IPM_CONSOLE_STACK_SIZE
default 4096 if COVERAGE
default 1024
endmenu

16
arch/mips/core/CMakeLists.txt

@ -0,0 +1,16 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources(
cpu_idle.c
fatal.c
irq_manage.c
isr.S
prep_c.c
reset.S
swap.S
thread.c
)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)

30
arch/mips/core/cpu_idle.c

@ -0,0 +1,30 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <irq.h>
#include <tracing/tracing.h>
static ALWAYS_INLINE void mips_idle(unsigned int key)
{
sys_trace_idle();
/* unlock interrupts */
irq_unlock(key);
/* wait for interrupt */
__asm__ volatile("wait");
}
void arch_cpu_idle(void)
{
mips_idle(1);
}
void arch_cpu_atomic_idle(unsigned int key)
{
mips_idle(key);
}

97
arch/mips/core/fatal.c

@ -0,0 +1,97 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <sys/printk.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
const z_arch_esf_t *esf)
{
if (esf != NULL) {
printk("$ 0 : (ze) %08lx(at) %08lx(v0) %08lx(v1)\n",
esf->at, esf->v0, esf->v1);
printk("$ 4 : %08lx(a0) %08lx(a1) %08lx(a2) %08lx(a3)\n",
esf->a0, esf->a1, esf->a2, esf->a3);
printk("$ 8 : %08lx(t0) %08lx(t1) %08lx(t2) %08lx(t3)\n",
esf->t0, esf->t1, esf->t2, esf->t3);
printk("$12 : %08lx(t4) %08lx(t5) %08lx(t6) %08lx(t7)\n",
esf->t4, esf->t5, esf->t6, esf->t7);
printk("...\n");
printk("$24 : %08lx(t8) %08lx(t9)\n",
esf->t8, esf->t9);
printk("$28 : %08lx(gp) (sp) (s8) %08lx(ra)\n",
esf->gp, esf->ra);
printk("EPC : %08lx\n", esf->epc);
printk("Status: %08lx\n", esf->status);
printk("Cause : %08lx\n", esf->cause);
printk("BadVA : %08lx\n", esf->badvaddr);
}
z_fatal_error(reason, esf);
CODE_UNREACHABLE;
}
static char *cause_str(ulong_t cause)
{
switch (cause) {
case 0:
return "interrupt pending";
case 1:
return "TLB modified";
case 2:
return "TLB miss on load or ifetch";
case 3:
return "TLB miss on store";
case 4:
return "address error on load or ifetch";
case 5:
return "address error on store";
case 6:
return "bus error on ifetch";
case 7:
return "bus error on load or store";
case 8:
return "system call";
case 9:
return "breakpoint";
case 10:
return "reserved instruction";
case 11:
return "coprocessor unusable";
case 12:
return "arithmetic overflow";
case 13:
return "trap instruction";
case 14:
return "virtual coherency instruction";
case 15:
return "floating point";
case 16:
return "iwatch";
case 23:
return "dwatch";
case 31:
return "virtual coherency data";
default:
return "unknown";
}
}
void _Fault(z_arch_esf_t *esf)
{
ulong_t cause;
cause = (read_c0_cause() & CAUSE_EXP_MASK) >> CAUSE_EXP_SHIFT;
LOG_ERR("");
LOG_ERR(" cause: %ld, %s", cause, cause_str(cause));
z_mips_fatal_error(K_ERR_CPU_EXCEPTION, esf);
}

106
arch/mips/core/irq_manage.c

@ -0,0 +1,106 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/nios2/core/irq_manage.c
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <kswap.h>
#include <logging/log.h>
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
uint32_t mips_cp0_status_int_mask;
FUNC_NORETURN void z_irq_spurious(const void *unused)
{
ulong_t cause;
ARG_UNUSED(unused);
cause = (read_c0_cause() & CAUSE_EXP_MASK) >> CAUSE_EXP_SHIFT;
LOG_ERR("Spurious interrupt detected! CAUSE: %ld", cause);
z_mips_fatal_error(K_ERR_SPURIOUS_IRQ, NULL);
}
void arch_irq_enable(unsigned int irq)
{
unsigned int key;
uint32_t irq_mask;
key = irq_lock();
irq_mask = ST0_IP0 << irq;
mips_cp0_status_int_mask |= irq_mask;
write_c0_status(read_c0_status() | irq_mask);
irq_unlock(key);
}
void arch_irq_disable(unsigned int irq)
{
unsigned int key;
uint32_t irq_mask;
key = irq_lock();
irq_mask = ST0_IP0 << irq;
mips_cp0_status_int_mask &= ~irq_mask;
write_c0_status(read_c0_status() & ~irq_mask);
irq_unlock(key);
};
int arch_irq_is_enabled(unsigned int irq)
{
return read_c0_status() & (ST0_IP0 << irq);
}
void z_mips_enter_irq(uint32_t ipending)
{
_current_cpu->nested++;
#ifdef CONFIG_IRQ_OFFLOAD
z_irq_do_offload();
#endif
while (ipending) {
int index;
struct _isr_table_entry *ite;
if (IS_ENABLED(CONFIG_TRACING_ISR))
sys_trace_isr_enter();
index = find_lsb_set(ipending) - 1;
ipending &= ~BIT(index);
ite = &_sw_isr_table[index];
ite->isr(ite->arg);
if (IS_ENABLED(CONFIG_TRACING_ISR))
sys_trace_isr_exit();
}
_current_cpu->nested--;
if (IS_ENABLED(CONFIG_STACK_SENTINEL))
z_check_stack_sentinel();
}
#ifdef CONFIG_DYNAMIC_INTERRUPTS
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*routine)(const void *parameter),
const void *parameter, uint32_t flags)
{
ARG_UNUSED(flags);
ARG_UNUSED(priority);
z_isr_install(irq, routine, parameter);
return irq;
}
#endif /* CONFIG_DYNAMIC_INTERRUPTS */

50
arch/mips/core/irq_offload.c

@ -0,0 +1,50 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/core/irq_offload.c
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <kernel_structs.h>
#include <kernel_internal.h>
#include <irq.h>
#include <irq_offload.h>
volatile irq_offload_routine_t _offload_routine;
static volatile const void *offload_param;
/*
* Called by z_mips_enter_irq()
*
* Just in case the offload routine itself generates an unhandled
* exception, clear the offload_routine global before executing.
*/
void z_irq_do_offload(void)
{
irq_offload_routine_t tmp;
if (!_offload_routine) {
return;
}
tmp = _offload_routine;
_offload_routine = NULL;
tmp((const void *)offload_param);
}
void arch_irq_offload(irq_offload_routine_t routine, const void *parameter)
{
unsigned int key;
key = irq_lock();
_offload_routine = routine;
offload_param = parameter;
/* Generate irq offload trap */
__asm__ volatile ("syscall");
irq_unlock(key);
}

285
arch/mips/core/isr.S

@ -0,0 +1,285 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/core/isr.S and arch/nios2/core/exception.S
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <kernel_structs.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include <mips/regdef.h>
#include <mips/mipsregs.h>
#define ESF_O(FIELD) __z_arch_esf_t_##FIELD##_OFFSET
#define THREAD_O(FIELD) _thread_offset_to_##FIELD
/* Convenience macros for loading/storing register states. */
#define DO_CALLEE_SAVED(op, reg) \
op s0, THREAD_O(s0)(reg) ;\
op s1, THREAD_O(s1)(reg) ;\
op s2, THREAD_O(s2)(reg) ;\
op s3, THREAD_O(s3)(reg) ;\
op s4, THREAD_O(s4)(reg) ;\
op s5, THREAD_O(s5)(reg) ;\
op s6, THREAD_O(s6)(reg) ;\
op s7, THREAD_O(s7)(reg) ;\
op s8, THREAD_O(s8)(reg) ;
#define STORE_CALLEE_SAVED(reg) \
DO_CALLEE_SAVED(OP_STOREREG, reg)
#define LOAD_CALLEE_SAVED(reg) \
DO_CALLEE_SAVED(OP_LOADREG, reg)
#define DO_CALLER_SAVED(op) \
op ra, ESF_O(ra)(sp) ;\
op gp, ESF_O(gp)(sp) ;\
op AT, ESF_O(at)(sp) ;\
op t0, ESF_O(t0)(sp) ;\
op t1, ESF_O(t1)(sp) ;\
op t2, ESF_O(t2)(sp) ;\
op t3, ESF_O(t3)(sp) ;\
op t4, ESF_O(t4)(sp) ;\
op t5, ESF_O(t5)(sp) ;\
op t6, ESF_O(t6)(sp) ;\
op t7, ESF_O(t7)(sp) ;\
op t8, ESF_O(t8)(sp) ;\
op t9, ESF_O(t9)(sp) ;\
op a0, ESF_O(a0)(sp) ;\
op a1, ESF_O(a1)(sp) ;\
op a2, ESF_O(a2)(sp) ;\
op a3, ESF_O(a3)(sp) ;\
op v0, ESF_O(v0)(sp) ;\
op v1, ESF_O(v1)(sp) ;
#define STORE_CALLER_SAVED() \
addi sp, sp, -__z_arch_esf_t_SIZEOF ;\
DO_CALLER_SAVED(OP_STOREREG) ;
#define LOAD_CALLER_SAVED() \
DO_CALLER_SAVED(OP_LOADREG) ;\
addi sp, sp, __z_arch_esf_t_SIZEOF ;
/* imports */
GTEXT(_Fault)
GTEXT(_k_neg_eagain)
GTEXT(z_thread_mark_switched_in)
/* exports */
GTEXT(__isr_vec)
SECTION_FUNC(exception.entry, __isr_vec)
la k0, _mips_interrupt
jr k0
SECTION_FUNC(exception.other, _mips_interrupt)
.set noat
/*
* Save caller-saved registers on current thread stack.
*/
STORE_CALLER_SAVED()
/* save CP0 registers */
mfhi t0
mflo t1
OP_STOREREG t0, ESF_O(hi)(sp)
OP_STOREREG t1, ESF_O(lo)(sp)
mfc0 t0, CP0_EPC
OP_STOREREG t0, ESF_O(epc)(sp)
mfc0 t1, CP0_BADVADDR
OP_STOREREG t1, ESF_O(badvaddr)(sp)
mfc0 t0, CP0_STATUS
OP_STOREREG t0, ESF_O(status)(sp)
mfc0 t1, CP0_CAUSE
OP_STOREREG t1, ESF_O(cause)(sp)
/*
* Check if exception is the result of an interrupt or not.
*/
li k0, CAUSE_EXP_MASK
and k1, k0, t1
srl k1, k1, CAUSE_EXP_SHIFT
/* ExcCode == 8 (SYSCALL) ? */
li k0, 8
beq k0, k1, is_kernel_syscall
/* a0 = ((cause & status) & CAUSE_IP_MASK) >> CAUSE_IP_SHIFT */
and t1, t1, t0
li a0, CAUSE_IP_MASK
and a0, a0, t1
srl a0, a0, CAUSE_IP_SHIFT
/* ExcCode == 0 (INTERRUPT) ? if not, go to unhandled */
bnez k1, unhandled
/* cause IP_MASK != 0 ? */
bnez a0, is_interrupt
unhandled:
move a0, sp
jal _Fault
eret
is_kernel_syscall:
/*
* A syscall is the result of an syscall instruction, in which case the
* EPC will contain the address of the syscall instruction.
* Increment saved EPC by 4 to prevent triggering the same syscall
* again upon exiting the ISR.
*/
OP_LOADREG k0, ESF_O(epc)(sp)
addi k0, k0, 4
OP_STOREREG k0, ESF_O(epc)(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/*
* Determine if the system call is the result of an IRQ offloading.
* Done by checking if _offload_routine is not pointing to NULL.
* If NULL, jump to reschedule to perform a context-switch, otherwise,
* jump to is_interrupt to handle the IRQ offload.
*/
la t0, _offload_routine
OP_LOADREG t1, 0(t0)
/*
* Put 0 into a0: call z_mips_enter_irq() with ipending==0
* to prevent spurious interrupt.
*/
move a0, zero
bnez t1, is_interrupt
#endif /* CONFIG_IRQ_OFFLOAD */
/*
* Go to reschedule to handle context-switch
*/
j reschedule
is_interrupt:
/*
* Save current thread stack pointer and switch
* stack pointer to interrupt stack.
*/
/* Save thread stack pointer to temp register k0 */
move k0, sp
/* Switch to interrupt stack */
la k1, _kernel
OP_LOADREG sp, _kernel_offset_to_irq_stack(k1)
/*
* Save thread stack pointer on interrupt stack
*/
addi sp, sp, -16
OP_STOREREG k0, 0(sp)
on_irq_stack:
/*
* Enter C interrupt handling code. Value of ipending will be the
* function parameter since we put it in a0
*/
jal z_mips_enter_irq
on_thread_stack:
/* Restore thread stack pointer */
OP_LOADREG sp, 0(sp)
#ifdef CONFIG_PREEMPT_ENABLED
/*
* Check if we need to perform a reschedule
*/
/* Get pointer to _kernel.current */
OP_LOADREG t2, _kernel_offset_to_current(k1)
/*
* Check if next thread to schedule is current thread.
* If yes do not perform a reschedule
*/
OP_LOADREG t3, _kernel_offset_to_ready_q_cache(k1)
beq t3, t2, no_reschedule
#else
j no_reschedule
#endif /* CONFIG_PREEMPT_ENABLED */
reschedule:
/*
* Check if the current thread is the same as the thread on the ready Q. If
* so, do not reschedule.
* Note:
* Sometimes this code is execute back-to-back before the target thread
* has a chance to run. If this happens, the current thread and the
* target thread will be the same.
*/
la t0, _kernel
OP_LOADREG t2, _kernel_offset_to_current(t0)
OP_LOADREG t3, _kernel_offset_to_ready_q_cache(t0)
beq t2, t3, no_reschedule
/* Get reference to _kernel */
la t0, _kernel
/* Get pointer to _kernel.current */
OP_LOADREG t1, _kernel_offset_to_current(t0)
/*
* Save callee-saved registers of current kernel thread
* prior to handle context-switching
*/
STORE_CALLEE_SAVED(t1)
skip_callee_saved_reg:
/*
* Save stack pointer of current thread and set the default return value
* of z_swap to _k_neg_eagain for the thread.
*/
OP_STOREREG sp, _thread_offset_to_sp(t1)
la t2, _k_neg_eagain
lw t3, 0(t2)
sw t3, _thread_offset_to_swap_return_value(t1)
/* Get next thread to schedule. */
OP_LOADREG t1, _kernel_offset_to_ready_q_cache(t0)
/*
* Set _kernel.current to new thread loaded in t1
*/
OP_STOREREG t1, _kernel_offset_to_current(t0)
/* Switch to new thread stack */
OP_LOADREG sp, _thread_offset_to_sp(t1)
/* Restore callee-saved registers of new thread */
LOAD_CALLEE_SAVED(t1)
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
jal z_thread_mark_switched_in
#endif
/* fallthrough */
no_reschedule:
/* restore CP0 */
OP_LOADREG t1, ESF_O(hi)(sp)
OP_LOADREG t2, ESF_O(lo)(sp)
mthi t1
mtlo t2
OP_LOADREG k0, ESF_O(epc)(sp)
mtc0 k0, CP0_EPC
OP_LOADREG k1, ESF_O(status)(sp)
mtc0 k1, CP0_STATUS
ehb
/* Restore caller-saved registers from thread stack */
LOAD_CALLER_SAVED()
/* exit ISR */
eret

54
arch/mips/core/offsets/offsets.c

@ -0,0 +1,54 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/core/offsets/offsets.c
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel_arch_data.h>
#include <gen_offset.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_t, s0);
GEN_OFFSET_SYM(_callee_saved_t, s1);
GEN_OFFSET_SYM(_callee_saved_t, s2);
GEN_OFFSET_SYM(_callee_saved_t, s3);
GEN_OFFSET_SYM(_callee_saved_t, s4);
GEN_OFFSET_SYM(_callee_saved_t, s5);
GEN_OFFSET_SYM(_callee_saved_t, s6);
GEN_OFFSET_SYM(_callee_saved_t, s7);
GEN_OFFSET_SYM(_callee_saved_t, s8);
GEN_OFFSET_SYM(z_arch_esf_t, ra);
GEN_OFFSET_SYM(z_arch_esf_t, gp);
GEN_OFFSET_SYM(z_arch_esf_t, t0);
GEN_OFFSET_SYM(z_arch_esf_t, t1);
GEN_OFFSET_SYM(z_arch_esf_t, t2);
GEN_OFFSET_SYM(z_arch_esf_t, t3);
GEN_OFFSET_SYM(z_arch_esf_t, t4);
GEN_OFFSET_SYM(z_arch_esf_t, t5);
GEN_OFFSET_SYM(z_arch_esf_t, t6);
GEN_OFFSET_SYM(z_arch_esf_t, t7);
GEN_OFFSET_SYM(z_arch_esf_t, t8);
GEN_OFFSET_SYM(z_arch_esf_t, t9);
GEN_OFFSET_SYM(z_arch_esf_t, a0);
GEN_OFFSET_SYM(z_arch_esf_t, a1);
GEN_OFFSET_SYM(z_arch_esf_t, a2);
GEN_OFFSET_SYM(z_arch_esf_t, a3);
GEN_OFFSET_SYM(z_arch_esf_t, v0);
GEN_OFFSET_SYM(z_arch_esf_t, v1);
GEN_OFFSET_SYM(z_arch_esf_t, at);
GEN_OFFSET_SYM(z_arch_esf_t, epc);
GEN_OFFSET_SYM(z_arch_esf_t, badvaddr);
GEN_OFFSET_SYM(z_arch_esf_t, hi);
GEN_OFFSET_SYM(z_arch_esf_t, lo);
GEN_OFFSET_SYM(z_arch_esf_t, status);
GEN_OFFSET_SYM(z_arch_esf_t, cause);
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t)));
GEN_ABS_SYM_END

53
arch/mips/core/prep_c.c

@ -0,0 +1,53 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Full C support initialization
*/
#include <kernel_internal.h>
#include <irq.h>
static void interrupt_init(void)
{
extern char __isr_vec[];
extern uint32_t mips_cp0_status_int_mask;
unsigned long ebase;
irq_lock();
mips_cp0_status_int_mask = 0;
ebase = 0x80000000;
memcpy((void *)(ebase + 0x180), __isr_vec, 0x80);
/*
* Disable boot exception vector in BOOTROM,
* use exception vector in RAM.
*/
write_c0_status(read_c0_status() & ~(ST0_BEV));
}
/**
*
* @brief Prepare to and run C code
*
* This routine prepares for the execution of and runs C code.
*
* @return N/A
*/
void _PrepC(void)
{
z_bss_zero();
interrupt_init();
z_cstart();
CODE_UNREACHABLE;
}

57
arch/mips/core/reset.S

@ -0,0 +1,57 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <mips/regdef.h>
#include <mips/mipsregs.h>
GTEXT(__initialize)
GTEXT(__stack)
GTEXT(_PrepC)
/*
* Remainder of asm-land initialization code before we can jump into
* the C domain.
*/
SECTION_FUNC(TEXT, __initialize)
.set noreorder
mtc0 zero, CP0_CAUSE
ehb
mfc0 k0, CP0_STATUS
li k1, ~(ST0_ERL | ST0_IE)
and k0, k1
mtc0 k0, CP0_STATUS
ehb
#ifdef CONFIG_INIT_STACKS
/* Pre-populate all bytes in z_interrupt_stacks with 0xAA */
la t0, z_interrupt_stacks
li t1, CONFIG_ISR_STACK_SIZE
add t1, t1, t0
/* Populate z_interrupt_stacks with 0xaaaaaaaa */
li t2, 0xaaaaaaaa
aa_loop:
sw t2, 0(t0)
addi t0, t0, 4
blt t0, t1, aa_loop
nop /* delay slot */
#endif
/*
* Setup stack pointer.
*/
la sp, __stack
/*
* Jump into C domain.
*/
la v0, _PrepC
jal v0
nop /* delay slot */

48
arch/mips/core/swap.S

@ -0,0 +1,48 @@
/*
* Copyright (c) 2020, 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/core/swap.S
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include <mips/regdef.h>
/*
* unsigned int arch_swap(unsigned int key)
*
* Always called with interrupts locked
* key is stored in a0 register
*/
GTEXT(arch_swap)
SECTION_FUNC(exception.other, arch_swap)
/* Make a system call to perform context switch */
syscall
/*
* when thread is rescheduled, unlock irq and return.
* Restored register v0 contains IRQ lock state of thread.
*/
la k0, _kernel
/* Get pointer to _kernel.current */
lw k1, _kernel_offset_to_current(k0)
/* Load return value of arch_swap function in register v0 */
lw v0, _thread_offset_to_swap_return_value(k1)
/*
* Unlock irq, following IRQ lock state in v0 register.
*/
mfc0 k0, CP0_STATUS
or k0, k0, a0
mtc0 k0, CP0_STATUS
ehb
/* Return */
jr ra

41
arch/mips/core/thread.c

@ -0,0 +1,41 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/core/thread.c
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
extern uint32_t mips_cp0_status_int_mask;
void z_thread_entry(k_thread_entry_t thread,
void *arg1,
void *arg2,
void *arg3);
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3)
{
struct __esf *stack_init;
/* Initial stack frame for thread */
stack_init = (struct __esf *)Z_STACK_PTR_ALIGN(
Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr)
);
/* Setup the initial stack frame */
stack_init->a0 = (ulong_t)entry;
stack_init->a1 = (ulong_t)p1;
stack_init->a2 = (ulong_t)p2;
stack_init->a3 = (ulong_t)p3;
stack_init->status = CP0_STATUS_DEF_RESTORE
| mips_cp0_status_int_mask;
stack_init->epc = (ulong_t)z_thread_entry;
thread->callee_saved.sp = (ulong_t)stack_init;
}

39
arch/mips/include/kernel_arch_data.h

@ -0,0 +1,39 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/include/kernel_arch_data.h
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel structures definitions and various
* other definitions for the MIPS processor architecture.
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_DATA_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_DATA_H_
#include <toolchain.h>
#include <arch/cpu.h>
#ifndef _ASMLANGUAGE
#include <kernel.h>
#include <zephyr/types.h>
#include <sys/util.h>
#include <sys/dlist.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_DATA_H_ */

55
arch/mips/include/kernel_arch_func.h

@ -0,0 +1,55 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/include/kernel_arch_func.h
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Private kernel definitions
*
* This file contains private kernel function/macro definitions and various
* other definitions for the MIPS processor architecture.
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_FUNC_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_FUNC_H_
#include <kernel_arch_data.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
static ALWAYS_INLINE void arch_kernel_init(void)
{
}
static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}
FUNC_NORETURN void z_mips_fatal_error(unsigned int reason,
const z_arch_esf_t *esf);
static inline bool arch_is_in_isr(void)
{
return _current_cpu->nested != 0U;
}
#ifdef CONFIG_IRQ_OFFLOAD
void z_irq_do_offload(void);
#endif
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_KERNEL_ARCH_FUNC_H_ */

51
arch/mips/include/mips/mipsregs.h

@ -0,0 +1,51 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* Macros for MIPS CP0 registers manipulations
* inspired by linux/arch/mips/include/asm/mipsregs.h
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_MIPSREGS_H_
#define _ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_MIPSREGS_H_
#define CP0_BADVADDR $8
#define CP0_STATUS $12
#define CP0_CAUSE $13
#define CP0_EPC $14
/* CP0_STATUS bits */
#define ST0_IE 0x00000001
#define ST0_EXL 0x00000002
#define ST0_ERL 0x00000004
#define ST0_IP0 0x00000100
#define ST0_BEV 0x00400000
/* CP0_CAUSE bits */
#define CAUSE_EXP_MASK 0x0000007c
#define CAUSE_EXP_SHIFT 2
#define CAUSE_IP_MASK 0x0000ff00
#define CAUSE_IP_SHIFT 8
#define _mips_read_32bit_c0_register(reg) \
({ \
uint32_t val; \
__asm__ __volatile__("mfc0\t%0, " STRINGIFY(reg) "\n" \
: "=r" (val)); \
val; \
})
#define _mips_write_32bit_c0_register(reg, val) \
({ \
__asm__ __volatile__("mtc0 %z0, " STRINGIFY(reg) "\n" \
: \
: "Jr" ((uint32_t)(val))); \
})
#define read_c0_status() _mips_read_32bit_c0_register(CP0_STATUS)
#define write_c0_status(val) _mips_write_32bit_c0_register(CP0_STATUS, val)
#define read_c0_cause() _mips_read_32bit_c0_register(CP0_CAUSE)
#endif /* _ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_MIPSREGS_H_ */

72
arch/mips/include/mips/regdef.h

@ -0,0 +1,72 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* Register names for o32 ABI, see [1] for details.
*
* [1] See MIPS Run (The Morgan Kaufmann Series in Computer
* Architecture and Design) 2nd Edition by Dominic Sweetman
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_REGDEF_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_REGDEF_H_
/* always 0 */
#define zero $0
/* assembly temporary */
#define AT $1
/* subroutine return values */
#define v0 $2
#define v1 $3
/* arguments */
#define a0 $4
#define a1 $5
#define a2 $6
#define a3 $7
/* temporaries */
#define t0 $8
#define t1 $9
#define t2 $10
#define t3 $11
#define t4 $12
#define t5 $13
#define t6 $14
#define t7 $15
/* subroutine register variables */
#define s0 $16
#define s1 $17
#define s2 $18
#define s3 $19
#define s4 $20
#define s5 $21
#define s6 $22
#define s7 $23
/* temporaries */
#define t8 $24
#define t9 $25
/* interrupt/trap handler scratch registers */
#define k0 $26
#define k1 $27
/* global pointer */
#define gp $28
/* stack pointer */
#define sp $29
/* frame pointer / ninth subroutine register variable */
#define fp $30
#define s8 $30
/* return address */
#define ra $31
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_MIPS_REGDEF_H_ */

47
arch/mips/include/offsets_short_arch.h

@ -0,0 +1,47 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on arch/riscv/include/offsets_short_arch.h
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_
#include <offsets.h>
#define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
#define _thread_offset_to_s0 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s0_OFFSET)
#define _thread_offset_to_s1 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s1_OFFSET)
#define _thread_offset_to_s2 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s2_OFFSET)
#define _thread_offset_to_s3 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s3_OFFSET)
#define _thread_offset_to_s4 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s4_OFFSET)
#define _thread_offset_to_s5 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s5_OFFSET)
#define _thread_offset_to_s6 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s6_OFFSET)
#define _thread_offset_to_s7 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s7_OFFSET)
#define _thread_offset_to_s8 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_s8_OFFSET)
#define _thread_offset_to_swap_return_value \
(___thread_t_arch_OFFSET + ___thread_arch_t_swap_return_value_OFFSET)
#endif /* ZEPHYR_ARCH_MIPS_INCLUDE_OFFSETS_SHORT_ARCH_H_ */

2
include/arch/cpu.h

@ -25,6 +25,8 @@
#include <arch/riscv/arch.h> #include <arch/riscv/arch.h>
#elif defined(CONFIG_XTENSA) #elif defined(CONFIG_XTENSA)
#include <arch/xtensa/arch.h> #include <arch/xtensa/arch.h>
#elif defined(CONFIG_MIPS)
#include <arch/mips/arch.h>
#elif defined(CONFIG_ARCH_POSIX) #elif defined(CONFIG_ARCH_POSIX)
#include <arch/posix/arch.h> #include <arch/posix/arch.h>
#elif defined(CONFIG_SPARC) #elif defined(CONFIG_SPARC)

117
include/arch/mips/arch.h

@ -0,0 +1,117 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on include/arch/sparc/arch.h
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_H_
#define ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_H_
#include <arch/mips/thread.h>
#include <arch/mips/exp.h>
#include <arch/common/sys_bitops.h>
#include <arch/common/sys_io.h>
#include <arch/common/ffs.h>
#include <irq.h>
#include <sw_isr_table.h>
#include <devicetree.h>
#include <mips/mipsregs.h>
#define ARCH_STACK_PTR_ALIGN 16
#define OP_LOADREG lw
#define OP_STOREREG sw
#define CP0_STATUS_DEF_RESTORE (ST0_EXL | ST0_IE)
#ifndef _ASMLANGUAGE
#include <sys/util.h>
#ifdef __cplusplus
extern "C" {
#endif
#define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN)
void arch_irq_enable(unsigned int irq);
void arch_irq_disable(unsigned int irq);
int arch_irq_is_enabled(unsigned int irq);
void z_irq_spurious(const void *unused);
/**
* Configure a static interrupt.
*
* All arguments must be computable by the compiler at build time.
*
* @param irq_p IRQ line number
* @param priority_p Interrupt priority
* @param isr_p Interrupt service routine
* @param isr_param_p ISR parameter
* @param flags_p IRQ options
*
* @return The vector assigned to this interrupt
*/
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
{ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
}
static ALWAYS_INLINE unsigned int arch_irq_lock(void)
{
uint32_t status = read_c0_status();
if (status & ST0_IE) {
write_c0_status(status & ~ST0_IE);
return 1;
}
return 0;
}
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
uint32_t status = read_c0_status();
if (key) {
status |= ST0_IE;
} else {
status &= ~ST0_IE;
}
write_c0_status(status);
}
static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
return key != 0;
}
static ALWAYS_INLINE void arch_nop(void)
{
__asm__ volatile ("nop");
}
extern uint32_t sys_clock_cycle_get_32(void);
static inline uint32_t arch_k_cycle_get_32(void)
{
return sys_clock_cycle_get_32();
}
extern uint64_t sys_clock_cycle_get_64(void);
static inline uint64_t arch_k_cycle_get_64(void)
{
return sys_clock_cycle_get_64();
}
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_MIPS_ARCH_H_ */

61
include/arch/mips/exp.h

@ -0,0 +1,61 @@
/*
* Copyright (c) 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on include/arch/riscv/exp.h
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_MIPS_EXP_H_
#define ZEPHYR_INCLUDE_ARCH_MIPS_EXP_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <toolchain.h>
#ifdef __cplusplus
extern "C" {
#endif
struct __esf {
ulong_t ra; /* return address */
ulong_t gp; /* global pointer */
ulong_t t0; /* Caller-saved temporary register */
ulong_t t1; /* Caller-saved temporary register */
ulong_t t2; /* Caller-saved temporary register */
ulong_t t3; /* Caller-saved temporary register */
ulong_t t4; /* Caller-saved temporary register */
ulong_t t5; /* Caller-saved temporary register */
ulong_t t6; /* Caller-saved temporary register */
ulong_t t7; /* Caller-saved temporary register */
ulong_t t8; /* Caller-saved temporary register */
ulong_t t9; /* Caller-saved temporary register */
ulong_t a0; /* function argument */
ulong_t a1; /* function argument */
ulong_t a2; /* function argument */
ulong_t a3; /* function argument */
ulong_t v0; /* return value */
ulong_t v1; /* return value */
ulong_t at; /* assembly temporary */
ulong_t epc;
ulong_t badvaddr;
ulong_t hi;
ulong_t lo;
ulong_t status;
ulong_t cause;
};
typedef struct __esf z_arch_esf_t;
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_MIPS_EXP_H_ */

209
include/arch/mips/linker.ld

@ -0,0 +1,209 @@
/*
* Copyright (c) 2020, 2021 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on include/arch/sparc/linker.ld
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Linker command/script file for the MIPS platform
*/
#include <autoconf.h>
#include <linker/sections.h>
#include <linker/linker-defs.h>
#include <linker/linker-tool.h>
#define ROMABLE_REGION RAM
#define RAMABLE_REGION RAM
#define _VECTOR_SECTION_NAME vector
#define _EXCEPTION_SECTION_NAME exceptions
#define _RESET_SECTION_NAME reset
MEMORY
{
RAM (rwx) : ORIGIN = CONFIG_SRAM_BASE_ADDRESS, LENGTH = KB(CONFIG_SRAM_SIZE)
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = 0xFFFFF7FF, LENGTH = 2K
}
REGION_ALIAS("REGION_TEXT", RAM);
REGION_ALIAS("REGION_RODATA", RAM);
REGION_ALIAS("REGION_DATA_VMA", RAM);
REGION_ALIAS("REGION_DATA_LMA", RAM);
REGION_ALIAS("REGION_BSS", RAM);
ENTRY(CONFIG_KERNEL_ENTRY)
PROVIDE (__memory_base = CONFIG_SRAM_BASE_ADDRESS);
PROVIDE (__memory_size = CONFIG_SRAM_SIZE * 1024);
PROVIDE (__stack = CONFIG_SRAM_BASE_ADDRESS + (CONFIG_SRAM_SIZE - 1) * 1024);
SECTIONS
{
#include <linker/rel-sections.ld>
SECTION_PROLOGUE(_VECTOR_SECTION_NAME,,)
{
. = ALIGN(0x1000);
KEEP(*(.vectors.*))
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_RESET_SECTION_NAME,,)
{
. = ALIGN(0x10);
KEEP(*(.reset.*))
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_EXCEPTION_SECTION_NAME,,)
{
. = ALIGN(0x10);
KEEP(*(".exception.entry.*"))
*(".exception.other.*")
} GROUP_LINK_IN(ROMABLE_REGION)
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
. = ALIGN(4);
*(.text)
*(".text.*")
} GROUP_LINK_IN(REGION_TEXT)
__rodata_region_start = .;
#include <linker/common-rom.ld>
#include <linker/thread-local-storage.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
. = ALIGN(8);
*(.rodata)
*(.rodata.*)
*(.gnu.linkonce.r.*)
*(.rodata1)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
} GROUP_LINK_IN(REGION_RODATA)
#include <linker/cplusplus-rom.ld>
__rodata_region_end = .;
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
. = ALIGN(8);
_image_ram_start = .;
__data_ram_start = .;
*(.data)
*(.data.*)
*(.gnu.linkonce.d.*)
*(.sdata)
*(.sdata.*)
. = ALIGN(8);
SORT(CONSTRUCTORS)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
} GROUP_DATA_LINK_IN(REGION_DATA_VMA, REGION_DATA_LMA)
#include <linker/common-ram.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-data-sections.ld>
__data_ram_end = .;
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
*(.dynbss)
*(.sbss)
*(.sbss.*)
*(.bss)
*(.bss.*)
*(.gnu.linkonce.b.*)
*(.scommon)
COMMON_SYMBOLS
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_LINK_IN(REGION_BSS)
SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(.noinit.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
} GROUP_LINK_IN(REGION_BSS)
#include <linker/cplusplus-ram.ld>
_image_ram_end = .;
_end = .; /* end of image */
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <linker/debug-sections.ld>
.mdebug.abi32 : {
KEEP(*(.mdebug.abi32))
}
SECTION_PROLOGUE(.gnu.attributes, 0,)
{
KEEP(*(.gnu.attributes))
}
/DISCARD/ : {
*(.MIPS.abiflags)
*(.pdr)
*(.reginfo)
}
}

54
include/arch/mips/thread.h

@ -0,0 +1,54 @@
/*
* Copyright (c) 2020 Antony Pavlov <antonynpavlov@gmail.com>
*
* based on include/arch/riscv/thread.h
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Per-arch thread definition
*
* This file contains definitions for
*
* struct _thread_arch
* struct _callee_saved
*
* necessary to instantiate instances of struct k_thread.
*/
#ifndef ZEPHYR_INCLUDE_ARCH_MIPS_THREAD_H_
#define ZEPHYR_INCLUDE_ARCH_MIPS_THREAD_H_
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
/*
* The following structure defines the list of registers that need to be
* saved/restored when a cooperative context switch occurs.
*/
struct _callee_saved {
ulong_t sp; /* Stack pointer */
ulong_t s0; /* saved register */
ulong_t s1; /* saved register */
ulong_t s2; /* saved register */
ulong_t s3; /* saved register */
ulong_t s4; /* saved register */
ulong_t s5; /* saved register */
ulong_t s6; /* saved register */
ulong_t s7; /* saved register */
ulong_t s8; /* saved register AKA fp */
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
uint32_t swap_return_value; /* Return value of z_swap() */
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_MIPS_THREAD_H_ */

2
include/linker/linker-tool-gcc.h

@ -51,6 +51,8 @@
#endif #endif
#elif defined(CONFIG_XTENSA) #elif defined(CONFIG_XTENSA)
/* Not needed */ /* Not needed */
#elif defined(CONFIG_MIPS)
OUTPUT_ARCH("mips")
#elif defined(CONFIG_ARCH_POSIX) #elif defined(CONFIG_ARCH_POSIX)
/* Not needed */ /* Not needed */
#elif defined(CONFIG_SPARC) #elif defined(CONFIG_SPARC)

2
include/toolchain/common.h

@ -82,7 +82,7 @@
#define PERFOPT_ALIGN .align 4 #define PERFOPT_ALIGN .align 4
#elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) || \ #elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) || \
defined(CONFIG_XTENSA) defined(CONFIG_XTENSA) || defined(CONFIG_MIPS)
#define PERFOPT_ALIGN .balign 4 #define PERFOPT_ALIGN .balign 4
#elif defined(CONFIG_ARCH_POSIX) #elif defined(CONFIG_ARCH_POSIX)

6
include/toolchain/gcc.h

@ -295,7 +295,8 @@ do { \
#if defined(_ASMLANGUAGE) #if defined(_ASMLANGUAGE)
#if defined(CONFIG_ARM) || defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) \ #if defined(CONFIG_ARM) || defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) \
|| defined(CONFIG_XTENSA) || defined(CONFIG_ARM64) || defined(CONFIG_XTENSA) || defined(CONFIG_ARM64) \
|| defined(CONFIG_MIPS)
#define GTEXT(sym) .global sym; .type sym, %function #define GTEXT(sym) .global sym; .type sym, %function
#define GDATA(sym) .global sym; .type sym, %object #define GDATA(sym) .global sym; .type sym, %object
#define WTEXT(sym) .weak sym; .type sym, %function #define WTEXT(sym) .weak sym; .type sym, %function
@ -476,7 +477,8 @@ do { \
"\n\t.equ\t" #name "," #value \ "\n\t.equ\t" #name "," #value \
"\n\t.type\t" #name ",@object") "\n\t.type\t" #name ",@object")
#elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) || defined(CONFIG_XTENSA) #elif defined(CONFIG_NIOS2) || defined(CONFIG_RISCV) || \
defined(CONFIG_XTENSA) || defined(CONFIG_MIPS)
/* No special prefixes necessary for constants in this arch AFAICT */ /* No special prefixes necessary for constants in this arch AFAICT */
#define GEN_ABSOLUTE_SYM(name, value) \ #define GEN_ABSOLUTE_SYM(name, value) \

2
lib/libc/minimal/include/sys/types.h

@ -47,6 +47,8 @@ typedef int off_t;
typedef int off_t; typedef int off_t;
#elif defined(__sparc__) #elif defined(__sparc__)
typedef int off_t; typedef int off_t;
#elif defined(__mips)
typedef int off_t;
#else #else
#error "The minimal libc library does not recognize the architecture!\n" #error "The minimal libc library does not recognize the architecture!\n"
#endif #endif

2
lib/os/cbprintf_packaged.c

@ -31,7 +31,7 @@ static inline bool ptr_in_rodata(const char *addr)
#define RO_END 0 #define RO_END 0
#elif defined(CONFIG_ARC) || defined(CONFIG_ARM) || defined(CONFIG_X86) \ #elif defined(CONFIG_ARC) || defined(CONFIG_ARM) || defined(CONFIG_X86) \
|| defined(CONFIG_RISCV) || defined(CONFIG_ARM64) \ || defined(CONFIG_RISCV) || defined(CONFIG_ARM64) \
|| defined(CONFIG_NIOS2) || defined(CONFIG_NIOS2) || defined(CONFIG_MIPS)
extern char __rodata_region_start[]; extern char __rodata_region_start[];
extern char __rodata_region_end[]; extern char __rodata_region_end[];
#define RO_START __rodata_region_start #define RO_START __rodata_region_start

3
scripts/logging/dictionary/dictionary_parser/log_database.py

@ -25,6 +25,9 @@ ARCHS = {
"arm64" : { "arm64" : {
"kconfig": "CONFIG_ARM64", "kconfig": "CONFIG_ARM64",
}, },
"mips" : {
"kconfig": "CONFIG_MIPS",
},
"sparc" : { "sparc" : {
"kconfig": "CONFIG_SPARC", "kconfig": "CONFIG_SPARC",
}, },

3
subsys/debug/thread_info.c

@ -68,6 +68,9 @@ size_t _kernel_thread_info_offsets[] = {
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread, [THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
callee_saved.esp), callee_saved.esp),
#endif #endif
#elif defined(CONFIG_MIPS)
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
callee_saved.sp),
#elif defined(CONFIG_NIOS2) #elif defined(CONFIG_NIOS2)
[THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread, [THREAD_INFO_OFFSET_T_STACK_PTR] = offsetof(struct k_thread,
callee_saved.sp), callee_saved.sp),

2
subsys/logging/log_core.c

@ -162,7 +162,7 @@ static bool is_rodata(const void *addr)
{ {
#if defined(CONFIG_ARM) || defined(CONFIG_ARC) || defined(CONFIG_X86) || \ #if defined(CONFIG_ARM) || defined(CONFIG_ARC) || defined(CONFIG_X86) || \
defined(CONFIG_ARM64) || defined(CONFIG_NIOS2) || \ defined(CONFIG_ARM64) || defined(CONFIG_NIOS2) || \
defined(CONFIG_RISCV) || defined(CONFIG_SPARC) defined(CONFIG_RISCV) || defined(CONFIG_SPARC) || defined(CONFIG_MIPS)
extern const char *__rodata_region_start[]; extern const char *__rodata_region_start[];
extern const char *__rodata_region_end[]; extern const char *__rodata_region_end[];
#define RO_START __rodata_region_start #define RO_START __rodata_region_start

8
subsys/testsuite/include/interrupt_util.h

@ -159,6 +159,14 @@ static inline void trigger_irq(int irq)
z_sparc_enter_irq(irq); z_sparc_enter_irq(irq);
} }
#elif defined(CONFIG_MIPS)
extern void z_mips_enter_irq(int);
static inline void trigger_irq(int irq)
{
z_mips_enter_irq(irq);
}
#else #else
/* So far, Nios II does not support this */ /* So far, Nios II does not support this */
#define NO_TRIGGER_FROM_SW #define NO_TRIGGER_FROM_SW

Loading…
Cancel
Save