1 | // SPDX-License-Identifier: GPL-3.0-or-later |
2 | |
3 | #include "mos/tasks/schedule.hpp" |
4 | |
5 | #include "mos/assert.hpp" |
6 | #include "mos/lib/sync/spinlock.hpp" |
7 | #include "mos/misc/setup.hpp" |
8 | #include "mos/platform/platform.hpp" |
9 | #include "mos/tasks/scheduler.hpp" |
10 | #include "mos/tasks/thread.hpp" |
11 | |
12 | #include <mos_string.hpp> |
13 | |
14 | char thread_state_str(thread_state_t state) |
15 | { |
16 | switch (state) |
17 | { |
18 | case THREAD_STATE_CREATED: return 'C'; |
19 | case THREAD_STATE_READY: return 'R'; |
20 | case THREAD_STATE_RUNNING: return 'r'; |
21 | case THREAD_STATE_BLOCKED: return 'B'; |
22 | case THREAD_STATE_NONINTERRUPTIBLE: return 'N'; |
23 | case THREAD_STATE_DEAD: return 'D'; |
24 | } |
25 | |
26 | MOS_UNREACHABLE(); |
27 | } |
28 | |
29 | static bool scheduler_ready = false; |
30 | static scheduler_t *active_scheduler = NULL; |
31 | extern const scheduler_info_t __MOS_SCHEDULERS_START[], __MOS_SCHEDULERS_END[]; |
32 | |
33 | MOS_SETUP("scheduler" , scheduler_cmdline_selector) |
34 | { |
35 | for (const scheduler_info_t *info = __MOS_SCHEDULERS_START; info < __MOS_SCHEDULERS_END; info++) |
36 | { |
37 | if (strcmp(str1: info->name, str2: arg) == 0) |
38 | { |
39 | active_scheduler = info->scheduler; |
40 | active_scheduler->ops->init(active_scheduler); |
41 | pr_dinfo2(scheduler, "active scheduler: %s" , info->name); |
42 | return true; |
43 | } |
44 | } |
45 | |
46 | pr_dwarn(scheduler, "scheduler '%s' not found" , arg); |
47 | return false; |
48 | } |
49 | |
50 | void scheduler_init() |
51 | { |
52 | if (!active_scheduler) |
53 | { |
54 | pr_dwarn(scheduler, "no scheduler is selected, using the first scheduler" ); |
55 | active_scheduler = __MOS_SCHEDULERS_START[0].scheduler; |
56 | active_scheduler->ops->init(active_scheduler); |
57 | } |
58 | } |
59 | |
60 | void unblock_scheduler(void) |
61 | { |
62 | pr_dinfo2(scheduler, "unblocking scheduler" ); |
63 | MOS_ASSERT_X(!scheduler_ready, "scheduler is already unblocked" ); |
64 | scheduler_ready = true; |
65 | } |
66 | |
67 | [[noreturn]] void enter_scheduler(void) |
68 | { |
69 | while (likely(!scheduler_ready)) |
70 | ; // wait for the scheduler to be unblocked |
71 | |
72 | pr_dinfo2(scheduler, "cpu %d: scheduler is ready" , platform_current_cpu_id()); |
73 | MOS_ASSERT(current_thread == nullptr); |
74 | reschedule(); |
75 | MOS_UNREACHABLE(); |
76 | } |
77 | |
78 | void scheduler_add_thread(Thread *thread) |
79 | { |
80 | MOS_ASSERT(thread_is_valid(thread)); |
81 | MOS_ASSERT_X(thread->state == THREAD_STATE_CREATED || thread->state == THREAD_STATE_READY, "thread %pt is not in a valid state" , thread); |
82 | active_scheduler->ops->add_thread(active_scheduler, thread); |
83 | } |
84 | |
85 | void scheduler_remove_thread(Thread *thread) |
86 | { |
87 | MOS_ASSERT(thread_is_valid(thread)); |
88 | active_scheduler->ops->remove_thread(active_scheduler, thread); |
89 | } |
90 | |
91 | void scheduler_wake_thread(Thread *thread) |
92 | { |
93 | spinlock_acquire(&thread->state_lock); |
94 | if (thread->state == THREAD_STATE_READY || thread->state == THREAD_STATE_RUNNING || thread->state == THREAD_STATE_CREATED || thread->state == THREAD_STATE_DEAD) |
95 | { |
96 | spinlock_release(&thread->state_lock); |
97 | return; // thread is already running or ready |
98 | } |
99 | |
100 | MOS_ASSERT_X(thread->state == THREAD_STATE_BLOCKED || thread->state == THREAD_STATE_NONINTERRUPTIBLE, "thread %pt is not blocked" , thread); |
101 | thread->state = THREAD_STATE_READY; |
102 | spinlock_release(&thread->state_lock); |
103 | pr_dinfo2(scheduler, "waking up %pt" , thread); |
104 | active_scheduler->ops->add_thread(active_scheduler, thread); |
105 | } |
106 | |
107 | void reschedule(void) |
108 | { |
109 | // A thread can jump to the scheduler if it is: |
110 | // - in RUNNING state normal condition (context switch caused by timer interrupt or yield()) |
111 | // - in CREATED state the thread is not yet started |
112 | // - in DEAD state the thread is exiting, and the scheduler will clean it up |
113 | // - in BLOCKED state the thread is waiting for a condition, and we'll schedule to other threads |
114 | // But it can't be: |
115 | // - in READY state |
116 | cpu_t *cpu = current_cpu; |
117 | |
118 | auto next = active_scheduler->ops->select_next(active_scheduler); |
119 | |
120 | if (!next) |
121 | { |
122 | if (current_thread && current_thread->state == THREAD_STATE_RUNNING) |
123 | { |
124 | // give the current thread another chance to run, if it's the only one and it's able to run |
125 | MOS_ASSERT_X(spinlock_is_locked(¤t_thread->state_lock), "thread state lock must be held" ); |
126 | pr_dinfo2(scheduler, "no thread to run, staying with %pt, state = %c" , current_thread, thread_state_str(current_thread->state)); |
127 | spinlock_release(¤t_thread->state_lock); |
128 | return; |
129 | } |
130 | |
131 | next = cpu->idle_thread; |
132 | } |
133 | |
134 | const bool should_switch_mm = cpu->mm_context != next->owner->mm; |
135 | if (should_switch_mm) |
136 | { |
137 | MMContext *old = mm_switch_context(new_ctx: next->owner->mm); |
138 | MOS_UNUSED(old); |
139 | } |
140 | |
141 | const switch_flags_t switch_flags = statement_expr(switch_flags_t, { |
142 | retval = SWITCH_REGULAR; |
143 | if (next->state == THREAD_STATE_CREATED) |
144 | retval |= next->mode == THREAD_MODE_KERNEL ? SWITCH_TO_NEW_KERNEL_THREAD : SWITCH_TO_NEW_USER_THREAD; |
145 | }); |
146 | |
147 | if (likely(current_thread)) |
148 | { |
149 | if (current_thread->state == THREAD_STATE_RUNNING) |
150 | { |
151 | current_thread->state = THREAD_STATE_READY; |
152 | if (current_thread != cpu->idle_thread) |
153 | scheduler_add_thread(current_thread); |
154 | } |
155 | pr_dinfo2(scheduler, "leaving %pt, state: '%c'" , current_thread, thread_state_str(current_thread->state)); |
156 | } |
157 | pr_dinfo2(scheduler, "switching to %pt, state: '%c'" , next, thread_state_str(next->state)); |
158 | |
159 | next->state = THREAD_STATE_RUNNING; |
160 | spinlock_release(&next->state_lock); |
161 | platform_switch_to_thread(current_thread, new_thread: next, switch_flags); |
162 | } |
163 | |
164 | void blocked_reschedule(void) |
165 | { |
166 | spinlock_acquire(¤t_thread->state_lock); |
167 | current_thread->state = THREAD_STATE_BLOCKED; |
168 | pr_dinfo2(scheduler, "%pt is now blocked" , current_thread); |
169 | reschedule(); |
170 | } |
171 | |
172 | bool reschedule_for_waitlist(waitlist_t *waitlist) |
173 | { |
174 | MOS_ASSERT_X(current_thread->state != THREAD_STATE_BLOCKED, "thread %d is already blocked" , current_thread->tid); |
175 | |
176 | if (!waitlist_append(list: waitlist)) |
177 | return false; // waitlist is closed, process is dead |
178 | |
179 | blocked_reschedule(); |
180 | return true; |
181 | } |
182 | |