1 | // SPDX-License-Identifier: GPL-3.0-or-later |
2 | |
3 | #include "mos/tasks/schedule.h" |
4 | |
5 | #include "mos/assert.h" |
6 | #include "mos/lib/sync/spinlock.h" |
7 | #include "mos/misc/setup.h" |
8 | #include "mos/platform/platform.h" |
9 | #include "mos/tasks/scheduler.h" |
10 | #include "mos/tasks/thread.h" |
11 | |
12 | #include <mos_string.h> |
13 | |
14 | char thread_state_str(thread_state_t state) |
15 | { |
16 | switch (state) |
17 | { |
18 | case THREAD_STATE_CREATED: return 'C'; |
19 | case THREAD_STATE_READY: return 'R'; |
20 | case THREAD_STATE_RUNNING: return 'r'; |
21 | case THREAD_STATE_BLOCKED: return 'B'; |
22 | case THREAD_STATE_NONINTERRUPTIBLE: return 'N'; |
23 | case THREAD_STATE_DEAD: return 'D'; |
24 | } |
25 | |
26 | MOS_UNREACHABLE(); |
27 | } |
28 | |
29 | static bool scheduler_ready = false; |
30 | static scheduler_t *active_scheduler = NULL; |
31 | extern const scheduler_info_t __MOS_SCHEDULERS_START[], __MOS_SCHEDULERS_END[]; |
32 | |
33 | MOS_SETUP("scheduler" , scheduler_cmdline_selector) |
34 | { |
35 | for (const scheduler_info_t *info = __MOS_SCHEDULERS_START; info < __MOS_SCHEDULERS_END; info++) |
36 | { |
37 | if (strcmp(str1: info->name, str2: arg) == 0) |
38 | { |
39 | active_scheduler = info->scheduler; |
40 | active_scheduler->ops->init(active_scheduler); |
41 | pr_dinfo2(scheduler, "active scheduler: %s" , info->name); |
42 | return true; |
43 | } |
44 | } |
45 | |
46 | pr_dwarn(scheduler, "scheduler '%s' not found" , arg); |
47 | return false; |
48 | } |
49 | |
50 | void scheduler_init() |
51 | { |
52 | if (!active_scheduler) |
53 | { |
54 | pr_dwarn(scheduler, "no scheduler is selected, using the first scheduler" ); |
55 | active_scheduler = __MOS_SCHEDULERS_START[0].scheduler; |
56 | active_scheduler->ops->init(active_scheduler); |
57 | } |
58 | } |
59 | |
60 | void unblock_scheduler(void) |
61 | { |
62 | pr_dinfo2(scheduler, "unblocking scheduler" ); |
63 | MOS_ASSERT_X(!scheduler_ready, "scheduler is already unblocked" ); |
64 | scheduler_ready = true; |
65 | } |
66 | |
67 | [[noreturn]] void enter_scheduler(void) |
68 | { |
69 | while (likely(!scheduler_ready)) |
70 | ; // wait for the scheduler to be unblocked |
71 | |
72 | pr_dinfo2(scheduler, "cpu %d: scheduler is ready" , platform_current_cpu_id()); |
73 | MOS_ASSERT(current_thread == NULL); |
74 | reschedule(); |
75 | MOS_UNREACHABLE(); |
76 | } |
77 | |
78 | void scheduler_add_thread(thread_t *thread) |
79 | { |
80 | MOS_ASSERT(thread_is_valid(thread)); |
81 | MOS_ASSERT_X(thread->state == THREAD_STATE_CREATED || thread->state == THREAD_STATE_READY, "thread %pt is not in a valid state" , (void *) thread); |
82 | active_scheduler->ops->add_thread(active_scheduler, thread); |
83 | } |
84 | |
85 | void scheduler_remove_thread(thread_t *thread) |
86 | { |
87 | MOS_ASSERT(thread_is_valid(thread)); |
88 | active_scheduler->ops->remove_thread(active_scheduler, thread); |
89 | } |
90 | |
91 | void scheduler_wake_thread(thread_t *thread) |
92 | { |
93 | spinlock_acquire(&thread->state_lock); |
94 | if (thread->state == THREAD_STATE_READY || thread->state == THREAD_STATE_RUNNING || thread->state == THREAD_STATE_CREATED || thread->state == THREAD_STATE_DEAD) |
95 | { |
96 | spinlock_release(&thread->state_lock); |
97 | return; // thread is already running or ready |
98 | } |
99 | |
100 | MOS_ASSERT_X(thread->state == THREAD_STATE_BLOCKED || thread->state == THREAD_STATE_NONINTERRUPTIBLE, "thread %pt is not blocked" , (void *) thread); |
101 | thread->state = THREAD_STATE_READY; |
102 | spinlock_release(&thread->state_lock); |
103 | pr_dinfo2(scheduler, "waking up %pt" , (void *) thread); |
104 | active_scheduler->ops->add_thread(active_scheduler, thread); |
105 | } |
106 | |
107 | void reschedule(void) |
108 | { |
109 | // A thread can jump to the scheduler if it is: |
110 | // - in RUNNING state normal condition (context switch caused by timer interrupt or yield()) |
111 | // - in CREATED state the thread is not yet started |
112 | // - in DEAD state the thread is exiting, and the scheduler will clean it up |
113 | // - in BLOCKED state the thread is waiting for a condition, and we'll schedule to other threads |
114 | // But it can't be: |
115 | // - in READY state |
116 | cpu_t *cpu = current_cpu; |
117 | thread_t *const current = cpu->thread; |
118 | |
119 | thread_t *next = active_scheduler->ops->select_next(active_scheduler); |
120 | |
121 | if (!next) |
122 | { |
123 | if (current && current->state == THREAD_STATE_RUNNING) |
124 | { |
125 | // give the current thread another chance to run, if it's the only one and it's able to run |
126 | MOS_ASSERT_X(spinlock_is_locked(¤t->state_lock), "thread state lock must be held" ); |
127 | pr_dinfo2(scheduler, "no thread to run, staying with %pt, state = %c" , (void *) current, thread_state_str(current->state)); |
128 | spinlock_release(¤t->state_lock); |
129 | return; |
130 | } |
131 | |
132 | next = cpu->idle_thread; |
133 | } |
134 | |
135 | const bool should_switch_mm = cpu->mm_context != next->owner->mm; |
136 | if (should_switch_mm) |
137 | { |
138 | mm_context_t *old = mm_switch_context(new_ctx: next->owner->mm); |
139 | MOS_UNUSED(old); |
140 | } |
141 | |
142 | const switch_flags_t switch_flags = statement_expr(switch_flags_t, { |
143 | retval = SWITCH_REGULAR; |
144 | if (next->state == THREAD_STATE_CREATED) |
145 | retval |= next->mode == THREAD_MODE_KERNEL ? SWITCH_TO_NEW_KERNEL_THREAD : SWITCH_TO_NEW_USER_THREAD; |
146 | }); |
147 | |
148 | if (likely(current)) |
149 | { |
150 | if (current->state == THREAD_STATE_RUNNING) |
151 | { |
152 | current->state = THREAD_STATE_READY; |
153 | if (current != cpu->idle_thread) |
154 | scheduler_add_thread(thread: current); |
155 | } |
156 | pr_dinfo2(scheduler, "leaving %pt, state: '%c'" , (void *) current, thread_state_str(current->state)); |
157 | } |
158 | pr_dinfo2(scheduler, "switching to %pt, state: '%c'" , (void *) next, thread_state_str(next->state)); |
159 | |
160 | next->state = THREAD_STATE_RUNNING; |
161 | spinlock_release(&next->state_lock); |
162 | platform_switch_to_thread(current, new_thread: next, switch_flags); |
163 | } |
164 | |
165 | void blocked_reschedule(void) |
166 | { |
167 | thread_t *t = current_cpu->thread; |
168 | spinlock_acquire(&t->state_lock); |
169 | t->state = THREAD_STATE_BLOCKED; |
170 | pr_dinfo2(scheduler, "%pt is now blocked" , (void *) t); |
171 | reschedule(); |
172 | } |
173 | |
174 | bool reschedule_for_waitlist(waitlist_t *waitlist) |
175 | { |
176 | thread_t *t = current_cpu->thread; |
177 | MOS_ASSERT_X(t->state != THREAD_STATE_BLOCKED, "thread %d is already blocked" , t->tid); |
178 | |
179 | if (!waitlist_append(list: waitlist)) |
180 | return false; // waitlist is closed, process is dead |
181 | |
182 | blocked_reschedule(); |
183 | return true; |
184 | } |
185 | |