Browse Source

kernel: Remove legacy preemption checking

The metairq feature exposed the fact that all of our arch code (and a
few mistaken spots in the scheduler too) was trying to interpret
"preemptible" threads independently.

As of the scheduler rewrite, that logic is entirely within sched.c and
doing it externally is redundant.  And now that "cooperative" threads
can be preempted, it's wrong and produces test failures when used with
metairq threads.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
pull/7422/merge
Andy Ross 7 years ago committed by Andrew Boie
parent
commit
3a0cb2d35d
  1. 7
      arch/arc/core/fast_irq.S
  2. 10
      arch/arc/core/regular_irq.S
  3. 8
      arch/arm/core/exc_exit.S
  4. 8
      arch/nios2/core/exception.S
  5. 8
      arch/riscv32/core/isr.S
  6. 8
      arch/x86/core/intstub.S
  7. 4
      arch/x86/core/irq_manage.c
  8. 7
      arch/xtensa/core/xt_zephyr.S
  9. 13
      kernel/sched.c

7
arch/arc/core/fast_irq.S

@ -149,13 +149,6 @@ SECTION_FUNC(TEXT, _firq_exit) @@ -149,13 +149,6 @@ SECTION_FUNC(TEXT, _firq_exit)
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldh_s r0, [r2, _thread_offset_to_preempt]
brhs r0, _NON_PREEMPT_THRESHOLD, _firq_no_reschedule
/* Check if the current thread (in r2) is the cached thread */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
brne r0, r2, _firq_reschedule

10
arch/arc/core/regular_irq.S

@ -109,16 +109,6 @@ SECTION_FUNC(TEXT, _rirq_exit) @@ -109,16 +109,6 @@ SECTION_FUNC(TEXT, _rirq_exit)
* point on until return from interrupt.
*/
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldh_s r0, [r2, _thread_offset_to_preempt]
mov r3, _NON_PREEMPT_THRESHOLD
cmp_s r0, r3
bhs.d _rirq_no_reschedule
/*
* Both (a)reschedule and (b)non-reschedule cases need to load the
* current thread's stack, but don't have to use it until the decision

8
arch/arm/core/exc_exit.S

@ -75,14 +75,6 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit) @@ -75,14 +75,6 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
ldr r1, [r0, #_kernel_offset_to_current]
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldrh r2, [r1, #_thread_offset_to_preempt]
cmp r2, #_PREEMPT_THRESHOLD
bhi _EXIT_EXC
ldr r0, [r0, _kernel_offset_to_ready_q_cache]
cmp r0, r1
beq _EXIT_EXC

8
arch/nios2/core/exception.S

@ -123,14 +123,6 @@ on_irq_stack: @@ -123,14 +123,6 @@ on_irq_stack:
* switch
*/
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldhu r12, _thread_offset_to_preempt(r11)
movui r3, _NON_PREEMPT_THRESHOLD
bgeu r12, r3, no_reschedule
/* Call into the kernel to see if a scheduling decision is necessary */
ldw r2, _kernel_offset_to_ready_q_cache(r10)
beq r2, r11, no_reschedule

8
arch/riscv32/core/isr.S

@ -291,14 +291,6 @@ on_thread_stack: @@ -291,14 +291,6 @@ on_thread_stack:
/* Get pointer to _kernel.current */
lw t2, _kernel_offset_to_current(t1)
/*
* If non-preemptible thread, do not schedule
* (see explanation of preempt field in kernel_structs.h
*/
lhu t3, _thread_offset_to_preempt(t2)
li t4, _NON_PREEMPT_THRESHOLD
bgeu t3, t4, no_reschedule
/*
* Check if next thread to schedule is current thread.
* If yes do not perform a reschedule

8
arch/x86/core/intstub.S

@ -255,14 +255,6 @@ alreadyOnIntStack: @@ -255,14 +255,6 @@ alreadyOnIntStack:
#ifdef CONFIG_PREEMPT_ENABLED
movl _kernel_offset_to_current(%ecx), %edx
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
cmpw $_NON_PREEMPT_THRESHOLD, _thread_offset_to_preempt(%edx)
jae noReschedule
/* reschedule only if the scheduler says that we must do so */
cmpl %edx, _kernel_offset_to_ready_q_cache(%ecx)
je noReschedule

4
arch/x86/core/irq_manage.c

@ -82,11 +82,9 @@ void _arch_isr_direct_footer(int swap) @@ -82,11 +82,9 @@ void _arch_isr_direct_footer(int swap)
*
* 1) swap argument was enabled to this function
* 2) We are not in a nested interrupt
* 3) Current thread is preemptible
* 4) Next thread to run in the ready queue is not this thread
* 3) Next thread to run in the ready queue is not this thread
*/
if (swap && !_kernel.nested &&
_current->base.preempt < _NON_PREEMPT_THRESHOLD &&
_kernel.ready_q.cache != _current) {
unsigned int flags;

7
arch/xtensa/core/xt_zephyr.S

@ -191,13 +191,6 @@ _zxt_int_exit: @@ -191,13 +191,6 @@ _zxt_int_exit:
wsr a3, CPENABLE /* disable all co-processors */
#endif
l32i a3, a2, KERNEL_OFFSET(current) /* _thread := _kernel.current */
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
movi a4, _NON_PREEMPT_THRESHOLD
l16ui a5, a3, THREAD_OFFSET(preempt)
bgeu a5, a4, .noReschedule
/* _thread := _kernel.ready_q.cache */
l32i a3, a2, KERNEL_OFFSET(ready_q_cache)
.noReschedule:

13
kernel/sched.c

@ -176,7 +176,6 @@ static void update_cache(int preempt_ok) @@ -176,7 +176,6 @@ static void update_cache(int preempt_ok)
th = _current;
}
}
_kernel.ready_q.cache = th;
#endif
}
@ -339,7 +338,6 @@ void _thread_priority_set(struct k_thread *thread, int prio) @@ -339,7 +338,6 @@ void _thread_priority_set(struct k_thread *thread, int prio)
int _reschedule(int key)
{
if (!_is_in_isr() &&
_is_preempt(_current) &&
_get_next_ready_thread() != _current) {
return _Swap(key);
}
@ -389,19 +387,10 @@ struct k_thread *_get_next_ready_thread(void) @@ -389,19 +387,10 @@ struct k_thread *_get_next_ready_thread(void)
#ifdef CONFIG_USE_SWITCH
void *_get_next_switch_handle(void *interrupted)
{
if (!_is_preempt(_current) &&
!(_current->base.thread_state & _THREAD_DEAD)) {
return interrupted;
}
_current->switch_handle = interrupted;
LOCKED(&sched_lock) {
struct k_thread *next = next_up();
if (next != _current) {
_current = next;
}
_current = _get_next_ready_thread();
}
_check_stack_sentinel();

Loading…
Cancel
Save