Browse Source

kernel: move z_init_static_threads to where it is being used

Move out of thread and put directly in init.c where it is being used.
Also remove definition from kernel.h, this is an internal function and
should not be in a public header.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
pull/69889/head
Anas Nashif 1 year ago
parent
commit
3ca50f5060
  1. 15
      include/zephyr/kernel.h
  2. 30
      kernel/include/kthread.h
  3. 53
      kernel/init.c
  4. 78
      kernel/thread.c

15
include/zephyr/kernel.h

@ -5935,21 +5935,6 @@ static inline void k_cpu_atomic_idle(unsigned int key) @@ -5935,21 +5935,6 @@ static inline void k_cpu_atomic_idle(unsigned int key)
* private APIs that are utilized by one or more public APIs
*/
/**
* @internal
*/
#ifdef CONFIG_MULTITHREADING
/**
* @internal
*/
void z_init_static_threads(void);
#else
/**
* @internal
*/
#define z_init_static_threads() do { } while (false)
#endif
/**
* @internal
*/

30
kernel/include/kthread.h

@ -0,0 +1,30 @@ @@ -0,0 +1,30 @@
/*
* Copyright (c) 2016-2017 Wind River Systems, Inc.
* Copyright (c) 2024 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_KERNEL_INCLUDE_THREAD_H_
#define ZEPHYR_KERNEL_INCLUDE_THREAD_H_
#include <zephyr/kernel.h>
#include <timeout_q.h>
#ifdef CONFIG_MULTITHREADING
static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_thread_start(thread);
} else {
z_add_thread_timeout(thread, delay);
}
#else
ARG_UNUSED(delay);
k_thread_start(thread);
#endif
}
#endif
#endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */

53
kernel/init.c

@ -23,6 +23,7 @@ @@ -23,6 +23,7 @@
#include <zephyr/init.h>
#include <zephyr/linker/linker-defs.h>
#include <ksched.h>
#include <kthread.h>
#include <string.h>
#include <zephyr/sys/dlist.h>
#include <kernel_internal.h>
@ -58,6 +59,57 @@ struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS]; @@ -58,6 +59,57 @@ struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
static K_KERNEL_PINNED_STACK_ARRAY_DEFINE(z_idle_stacks,
CONFIG_MP_MAX_NUM_CPUS,
CONFIG_IDLE_STACK_SIZE);
static void z_init_static_threads(void)
{
STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) {
z_setup_new_thread(
thread_data->init_thread,
thread_data->init_stack,
thread_data->init_stack_size,
thread_data->init_entry,
thread_data->init_p1,
thread_data->init_p2,
thread_data->init_p3,
thread_data->init_prio,
thread_data->init_options,
thread_data->init_name);
thread_data->init_thread->init_data = thread_data;
}
#ifdef CONFIG_USERSPACE
STRUCT_SECTION_FOREACH(k_object_assignment, pos) {
for (int i = 0; pos->objects[i] != NULL; i++) {
k_object_access_grant(pos->objects[i],
pos->thread);
}
}
#endif
/*
* Non-legacy static threads may be started immediately or
* after a previously specified delay. Even though the
* scheduler is locked, ticks can still be delivered and
* processed. Take a sched lock to prevent them from running
* until they are all started.
*
* Note that static threads defined using the legacy API have a
* delay of K_FOREVER.
*/
k_sched_lock();
STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) {
k_timeout_t init_delay = Z_THREAD_INIT_DELAY(thread_data);
if (!K_TIMEOUT_EQ(init_delay, K_FOREVER)) {
thread_schedule_new(thread_data->init_thread,
init_delay);
}
}
k_sched_unlock();
}
#else
#define z_init_static_threads() do { } while (false)
#endif /* CONFIG_MULTITHREADING */
extern const struct init_entry __init_start[];
@ -312,6 +364,7 @@ static void z_sys_init_run_level(enum init_level level) @@ -312,6 +364,7 @@ static void z_sys_init_run_level(enum init_level level)
extern void boot_banner(void);
/**
* @brief Mainline for kernel's background thread
*

78
kernel/thread.c

@ -16,6 +16,7 @@ @@ -16,6 +16,7 @@
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys_clock.h>
#include <ksched.h>
#include <kthread.h>
#include <wait_q.h>
#include <zephyr/internal/syscall_handler.h>
#include <kernel_internal.h>
@ -432,21 +433,6 @@ static inline void z_vrfy_k_thread_start(struct k_thread *thread) @@ -432,21 +433,6 @@ static inline void z_vrfy_k_thread_start(struct k_thread *thread)
#endif
#endif
#ifdef CONFIG_MULTITHREADING
static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
{
#ifdef CONFIG_SYS_CLOCK_EXISTS
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_thread_start(thread);
} else {
z_add_thread_timeout(thread, delay);
}
#else
ARG_UNUSED(delay);
k_thread_start(thread);
#endif
}
#endif
#if CONFIG_STACK_POINTER_RANDOM
int z_stack_adjust_initialized;
@ -717,7 +703,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread, @@ -717,7 +703,7 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
prio, options, NULL);
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
schedule_new_thread(new_thread, delay);
thread_schedule_new(new_thread, delay);
}
return new_thread;
@ -789,7 +775,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, @@ -789,7 +775,7 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
entry, p1, p2, p3, prio, options, NULL);
if (!K_TIMEOUT_EQ(delay, K_FOREVER)) {
schedule_new_thread(new_thread, delay);
thread_schedule_new(new_thread, delay);
}
return new_thread;
@ -798,64 +784,6 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread, @@ -798,64 +784,6 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_MULTITHREADING */
#ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_USERSPACE
static void grant_static_access(void)
{
STRUCT_SECTION_FOREACH(k_object_assignment, pos) {
for (int i = 0; pos->objects[i] != NULL; i++) {
k_object_access_grant(pos->objects[i],
pos->thread);
}
}
}
#endif /* CONFIG_USERSPACE */
void z_init_static_threads(void)
{
_FOREACH_STATIC_THREAD(thread_data) {
z_setup_new_thread(
thread_data->init_thread,
thread_data->init_stack,
thread_data->init_stack_size,
thread_data->init_entry,
thread_data->init_p1,
thread_data->init_p2,
thread_data->init_p3,
thread_data->init_prio,
thread_data->init_options,
thread_data->init_name);
thread_data->init_thread->init_data = thread_data;
}
#ifdef CONFIG_USERSPACE
grant_static_access();
#endif
/*
* Non-legacy static threads may be started immediately or
* after a previously specified delay. Even though the
* scheduler is locked, ticks can still be delivered and
* processed. Take a sched lock to prevent them from running
* until they are all started.
*
* Note that static threads defined using the legacy API have a
* delay of K_FOREVER.
*/
k_sched_lock();
_FOREACH_STATIC_THREAD(thread_data) {
k_timeout_t init_delay = Z_THREAD_INIT_DELAY(thread_data);
if (!K_TIMEOUT_EQ(init_delay, K_FOREVER)) {
schedule_new_thread(thread_data->init_thread,
init_delay);
}
}
k_sched_unlock();
}
#endif
void z_init_thread_base(struct _thread_base *thread_base, int priority,
uint32_t initial_state, unsigned int options)

Loading…
Cancel
Save