From 148769c715d5fb15acc0104385010d9b10b44a2b Mon Sep 17 00:00:00 2001 From: Flavio Ceolin Date: Fri, 18 Dec 2020 00:33:29 -0800 Subject: [PATCH] sched: timeout: Do not miss slice timeouts Time slices don't have a timeout struct associated and stored in timeout_list. Time slice timeout is direct programmed in the system clock and tracked in _current_cpu->slice_ticks. There is one issue where the time slice timeout can be missed because the system clock is re-programmed to a longer timeout. To this happens, it is only necessary that the timeout_list is empty (any timeout set) and a new timeout longer than remaining time slice is set. This is cause because z_add_timeout does not check for the slice ticks. The following example spots the issue: K_THREAD_STACK_DEFINE(tstack, STACK_SIZE); K_THREAD_STACK_ARRAY_DEFINE(tstacks, NUM_THREAD, STACK_SIZE); K_SEM_DEFINE(sema, 0, NUM_THREAD); static inline void spin_for_ms(int ms) { uint32_t t32 = k_uptime_get_32(); while (k_uptime_get_32() - t32 < ms) { } } static void thread_time_slice(void *p1, void *p2, void *p3) { printk("thread[%d] - Before spin\n", (int)(uintptr_t)p1); /* Spinning for longer than slice */ spin_for_ms(SLICE_SIZE + 20); /* The following print should not happen before another * same priority thread starts. */ printk("thread[%d] - After spinning\n", (int)(uintptr_t)p1); k_sem_give(&sema); } void main(void) { k_tid_t tid[NUM_THREAD]; struct k_thread t[NUM_THREAD]; uint32_t slice_ticks = k_ms_to_ticks_ceil32(SLICE_SIZE); int old_prio = k_thread_priority_get(k_current_get()); /* disable timeslice */ k_sched_time_slice_set(0, K_PRIO_PREEMPT(0)); for (int j = 0; j < 2; j++) { k_sem_reset(&sema); /* update priority for current thread */ k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(j)); /* synchronize to tick boundary */ k_usleep(1); /* create delayed threads with equal preemptive priority */ for (int i = 0; i < NUM_THREAD; i++) { tid[i] = k_thread_create(&t[i], tstacks[i], STACK_SIZE, thread_time_slice, (void *)i, NULL, NULL, K_PRIO_PREEMPT(j), 0, K_NO_WAIT); } /* enable time slice (and reset the counter!) */ k_sched_time_slice_set(SLICE_SIZE, K_PRIO_PREEMPT(0)); /* Spins for while to spend this thread time but not longer */ /* than a slice. This is important */ spin_for_ms(100); printk("before sleep\n"); /* relinquish CPU and wait for each thread to complete */ k_sleep(K_TICKS(slice_ticks * (NUM_THREAD + 1))); for (int i = 0; i < NUM_THREAD; i++) { k_sem_take(&sema, K_FOREVER); } /* test case teardown */ for (int i = 0; i < NUM_THREAD; i++) { k_thread_abort(tid[i]); } /* disable time slice */ k_sched_time_slice_set(0, K_PRIO_PREEMPT(0)); } k_thread_priority_set(k_current_get(), old_prio); } Signed-off-by: Flavio Ceolin --- kernel/timeout.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/kernel/timeout.c b/kernel/timeout.c index 80685bcb5cb..e87057169ce 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -123,7 +123,23 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn, } if (to == first()) { +#if CONFIG_TIMESLICING + /* + * This is not ideal, since it does not + * account the time elapsed since the the + * last announcement, and slice_ticks is based + * on that. It means the that time remaining for + * the next announcement can be lesser than + * slice_ticks. + */ + int32_t next_time = next_timeout(); + + if (_current_cpu->slice_ticks != next_time) { + z_clock_set_timeout(next_time, false); + } +#else z_clock_set_timeout(next_timeout(), false); +#endif /* CONFIG_TIMESLICING */ } } }