Browse Source

kernel/poll: Remove POLLING thread state bit

The _THREAD_POLLING bit in thread_state was never actually a
legitimate thread "state".  It is a clever synchronization trick
introduced to allow the thread to release the irq_lock while looping
over the input event array without dropping events.

Instead, make that flag a word in the "poller" struct that lives on
the stack of the thread calling k_poll.  The disadvantage is the 4
bytes of thread space needed.  Advantages:

+ Cleaner API, it's now internal to poll instead of being globally
  visible.

+ The thread_state bit space is just one byte, and was almost full
  already.

+ Smaller code to write/test a full word and not a bitfield

+ Words are atomic, so no need for one of irq lock/unlock pairs.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
pull/8324/head
Andy Ross 7 years ago committed by Anas Nashif
parent
commit
55a7e46b66
  1. 1
      include/kernel.h
  2. 3
      kernel/include/kernel_structs.h
  3. 15
      kernel/include/ksched.h
  4. 34
      kernel/poll.c

1
include/kernel.h

@ -4191,6 +4191,7 @@ extern void *k_calloc(size_t nmemb, size_t size); @@ -4191,6 +4191,7 @@ extern void *k_calloc(size_t nmemb, size_t size);
/* private - implementation data created as needed, per-type */
struct _poller {
struct k_thread *thread;
volatile int is_polling;
};
/* private - types bit positions */

3
kernel/include/kernel_structs.h

@ -45,9 +45,6 @@ @@ -45,9 +45,6 @@
/* Thread is suspended */
#define _THREAD_SUSPENDED (1 << 4)
/* Thread is actively looking at events to see if they are ready */
#define _THREAD_POLLING (1 << 5)
/* Thread is present in the ready queue */
#define _THREAD_QUEUED (1 << 6)

15
kernel/include/ksched.h

@ -107,11 +107,6 @@ static inline int _is_thread_state_set(struct k_thread *thread, u32_t state) @@ -107,11 +107,6 @@ static inline int _is_thread_state_set(struct k_thread *thread, u32_t state)
return !!(thread->base.thread_state & state);
}
static inline int _is_thread_polling(struct k_thread *thread)
{
return _is_thread_state_set(thread, _THREAD_POLLING);
}
static inline int _is_thread_queued(struct k_thread *thread)
{
return _is_thread_state_set(thread, _THREAD_QUEUED);
@ -153,16 +148,6 @@ static inline void _reset_thread_states(struct k_thread *thread, @@ -153,16 +148,6 @@ static inline void _reset_thread_states(struct k_thread *thread,
thread->base.thread_state &= ~states;
}
static inline void _mark_thread_as_polling(struct k_thread *thread)
{
_set_thread_states(thread, _THREAD_POLLING);
}
static inline void _mark_thread_as_not_polling(struct k_thread *thread)
{
_reset_thread_states(thread, _THREAD_POLLING);
}
static inline void _mark_thread_as_queued(struct k_thread *thread)
{
_set_thread_states(thread, _THREAD_QUEUED);

34
kernel/poll.c

@ -41,24 +41,6 @@ void k_poll_event_init(struct k_poll_event *event, u32_t type, @@ -41,24 +41,6 @@ void k_poll_event_init(struct k_poll_event *event, u32_t type,
event->obj = obj;
}
/* must be called with interrupts locked */
static inline void set_polling_state(struct k_thread *thread)
{
_mark_thread_as_polling(thread);
}
/* must be called with interrupts locked */
static inline void clear_polling_state(struct k_thread *thread)
{
_mark_thread_as_not_polling(thread);
}
/* must be called with interrupts locked */
static inline int is_polling(void)
{
return _is_thread_polling(_current);
}
/* must be called with interrupts locked */
static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
{
@ -199,11 +181,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -199,11 +181,7 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
int last_registered = -1, rc;
unsigned int key;
key = irq_lock();
set_polling_state(_current);
irq_unlock(key);
struct _poller poller = { .thread = _current };
struct _poller poller = { .thread = _current, .is_polling = 1, };
/* find events whose condition is already fulfilled */
for (int ii = 0; ii < num_events; ii++) {
@ -212,8 +190,8 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -212,8 +190,8 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
key = irq_lock();
if (is_condition_met(&events[ii], &state)) {
set_event_ready(&events[ii], state);
clear_polling_state(_current);
} else if (timeout != K_NO_WAIT && is_polling()) {
poller.is_polling = 0;
} else if (timeout != K_NO_WAIT && poller.is_polling) {
rc = register_event(&events[ii], &poller);
if (rc == 0) {
++last_registered;
@ -231,13 +209,13 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout) @@ -231,13 +209,13 @@ int _impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
* condition is met, either when looping through the events here or
* because one of the events registered has had its state changed.
*/
if (!is_polling()) {
if (!poller.is_polling) {
clear_event_registrations(events, last_registered, key);
irq_unlock(key);
return 0;
}
clear_polling_state(_current);
poller.is_polling = 0;
if (timeout == K_NO_WAIT) {
irq_unlock(key);
@ -349,7 +327,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state) @@ -349,7 +327,7 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state)
__ASSERT(event->poller->thread, "poller should have a thread\n");
clear_polling_state(thread);
event->poller->is_polling = 0;
if (!_is_thread_pending(thread)) {
goto ready_event;

Loading…
Cancel
Save