1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/assert.hpp"
4#include "mos/lib/sync/spinlock.hpp"
5#include "mos/mm/mm.hpp"
6
7#include <errno.h>
8#include <limits.h>
9#include <mos/lib/structures/hashmap.hpp>
10#include <mos/lib/structures/hashmap_common.hpp>
11#include <mos/lib/structures/list.hpp>
12#include <mos/mm/cow.hpp>
13#include <mos/mm/paging/paging.hpp>
14#include <mos/platform/platform.hpp>
15#include <mos/syslog/printk.hpp>
16#include <mos/tasks/process.hpp>
17#include <mos/tasks/schedule.hpp>
18#include <mos/tasks/task_types.hpp>
19#include <mos/tasks/thread.hpp>
20#include <mos/tasks/wait.hpp>
21#include <mos_stdlib.hpp>
22#include <mos_string.hpp>
23
24mos::HashMap<tid_t, Thread *> thread_table; // tid_t -> Thread
25
26static tid_t new_thread_id(void)
27{
28 static tid_t next = 1;
29 return (tid_t) { next++ };
30}
31
32Thread::~Thread()
33{
34 pr_emerg("thread %p destroyed", this);
35}
36
37Thread *thread_allocate(Process *owner, thread_mode tflags)
38{
39 const auto t = mos::create<Thread>();
40 t->magic = THREAD_MAGIC_THRD;
41 t->tid = new_thread_id();
42 t->owner = owner;
43 t->state = THREAD_STATE_CREATED;
44 t->mode = tflags;
45 waitlist_init(list: &t->waiters);
46 linked_list_init(head_node: &t->signal_info.pending);
47 linked_list_init(list_node(t));
48 owner->thread_list.push_back(value: t);
49 return t;
50}
51
52void thread_destroy(Thread *thread)
53{
54 MOS_ASSERT_X(thread != current_thread, "you cannot just destroy yourself");
55 if (!thread_is_valid(thread))
56 return;
57
58 thread_table.remove(key: thread->tid);
59
60 pr_dinfo2(thread, "destroying thread %pt", thread);
61 MOS_ASSERT_X(spinlock_is_locked(&thread->state_lock), "thread state lock must be held");
62 MOS_ASSERT_X(thread->state == THREAD_STATE_DEAD, "thread must be dead for destroy");
63
64 platform_context_cleanup(thread);
65
66 if (thread->mode == THREAD_MODE_USER)
67 {
68 const auto owner = thread->owner;
69 SpinLocker lock(&owner->mm->mm_lock);
70 vmap_t *const stack = vmap_obtain(mmctx: owner->mm, vaddr: (ptr_t) thread->u_stack.top - 1, NULL);
71 vmap_destroy(vmap: stack);
72 }
73
74 mm_free_pages(va_phyframe((ptr_t) thread->k_stack.top) - MOS_STACK_PAGES_KERNEL, MOS_STACK_PAGES_KERNEL);
75}
76
77PtrResult<Thread> thread_new(Process *owner, thread_mode tmode, mos::string_view name, size_t stack_size, void *explicit_stack_top)
78{
79 const auto t = thread_allocate(owner, tflags: tmode);
80
81 t->name = name;
82
83 pr_dinfo2(thread, "creating new thread %pt, owner=%pp", t, owner);
84
85 // Kernel stack
86 const ptr_t kstack_blk = phyframe_va(mm_get_free_pages(MOS_STACK_PAGES_KERNEL));
87 stack_init(stack: &t->k_stack, mem_region_bottom: (void *) kstack_blk, MOS_STACK_PAGES_KERNEL * MOS_PAGE_SIZE);
88
89 if (tmode != THREAD_MODE_USER)
90 {
91 stack_init(stack: &t->u_stack, NULL, size: 0); // kernel thread has no user stack
92 return t;
93 }
94
95 // User stack
96 const size_t user_stack_size = stack_size ? stack_size : MOS_STACK_PAGES_USER * MOS_PAGE_SIZE;
97 if (!explicit_stack_top)
98 {
99 auto stack_vmap = cow_allocate_zeroed_pages(handle: owner->mm, npages: user_stack_size / MOS_PAGE_SIZE, MOS_ADDR_USER_STACK, hints: VALLOC_DEFAULT, flags: VM_USER_RW);
100 if (stack_vmap.isErr())
101 {
102 pr_emerg("failed to allocate stack for new thread");
103 thread_destroy(thread: std::move(t: t));
104 return stack_vmap.getErr();
105 }
106
107 stack_init(stack: &t->u_stack, mem_region_bottom: (void *) stack_vmap->vaddr, size: user_stack_size);
108 vmap_finalise_init(vmap: stack_vmap.get(), content: VMAP_STACK, type: VMAP_TYPE_PRIVATE);
109 return t;
110 }
111
112 // check if the stack is valid
113 mm_lock_ctx_pair(ctx1: owner->mm, NULL);
114 vmap_t *stack_vmap = vmap_obtain(mmctx: owner->mm, vaddr: (ptr_t) explicit_stack_top, NULL);
115 if (!stack_vmap)
116 {
117 pr_warn("invalid stack pointer %pt", explicit_stack_top);
118 goto done_efault;
119 }
120
121 // check if the stack vmap is valid
122 if (stack_vmap->content == VMAP_STACK) // has been claimed by another thread?
123 {
124 pr_warn("stack %pt has been claimed by another thread", explicit_stack_top);
125 goto done_efault;
126 }
127
128 // check if the stack is large enough
129 if (stack_vmap->npages < user_stack_size / MOS_PAGE_SIZE)
130 {
131 pr_warn("stack %pt is too small (size=%zu, required=%zu)", explicit_stack_top, stack_vmap->npages * MOS_PAGE_SIZE, user_stack_size);
132 goto done_efault;
133 }
134
135 // check if the stack is writable
136 if (!(stack_vmap->vmflags & VM_USER_RW))
137 {
138 pr_warn("stack %pt is not writable", explicit_stack_top);
139 goto done_efault;
140 }
141
142 {
143 const ptr_t stack_bottom = ALIGN_UP_TO_PAGE((ptr_t) explicit_stack_top) - user_stack_size;
144 vmap_t *second = vmap_split(vmap: stack_vmap, split: (stack_bottom - stack_vmap->vaddr) / MOS_PAGE_SIZE);
145 spinlock_release(&stack_vmap->lock);
146 stack_vmap = second;
147
148 stack_vmap->content = VMAP_STACK;
149 stack_vmap->type = VMAP_TYPE_PRIVATE;
150 spinlock_release(&stack_vmap->lock);
151 mm_unlock_ctx_pair(ctx1: owner->mm, NULL);
152 stack_init(stack: &t->u_stack, mem_region_bottom: (void *) stack_bottom, size: user_stack_size);
153 t->u_stack.head = (ptr_t) explicit_stack_top;
154 return t;
155 }
156
157done_efault:
158 spinlock_release(&stack_vmap->lock);
159 mm_unlock_ctx_pair(ctx1: owner->mm, NULL);
160 spinlock_acquire(&t->state_lock);
161 thread_destroy(thread: std::move(t: t));
162 return -EFAULT; // invalid stack pointer
163}
164
165Thread *thread_complete_init(Thread *thread)
166{
167 if (!thread_is_valid(thread))
168 return NULL;
169
170 thread_table.insert(key: thread->tid, value: thread);
171 return thread;
172}
173
174Thread *thread_get(tid_t tid)
175{
176 const auto ppthread = thread_table.get(key: tid);
177 if (ppthread == nullptr)
178 {
179 pr_warn("thread_get(%d) from pid %d (%s) but thread does not exist", tid, current_process->pid, current_process->name.c_str());
180 return NULL;
181 }
182
183 if (thread_is_valid(thread: *ppthread))
184 return *ppthread;
185
186 return NULL;
187}
188
189bool thread_wait_for_tid(tid_t tid)
190{
191 auto target = thread_get(tid);
192 if (!target)
193 {
194 pr_warn("wait_for_tid(%d) from pid %d (%s) but thread does not exist", tid, current_process->pid, current_process->name.c_str());
195 return false;
196 }
197
198 if (target->owner != current_process)
199 {
200 pr_warn("wait_for_tid(%d) from process %pp but thread belongs to %pp", tid, current_process, target->owner);
201 return false;
202 }
203
204 bool ok = reschedule_for_waitlist(waitlist: &target->waiters);
205 MOS_UNUSED(ok); // true: thread is dead, false: thread is already dead at the time of calling
206
207 return true;
208}
209
210void thread_exit(Thread *&&t)
211{
212 MOS_ASSERT_X(thread_is_valid(t), "thread_handle_exit() called on invalid thread");
213 spinlock_acquire(&t->state_lock);
214 thread_exit_locked(t: std::move(t&: t));
215}
216
217[[noreturn]] void thread_exit_locked(Thread *&&t)
218{
219 MOS_ASSERT_X(thread_is_valid(t), "thread_exit_locked() called on invalid thread");
220
221 pr_dinfo(thread, "thread %pt is exiting", t);
222
223 MOS_ASSERT_X(spinlock_is_locked(&t->state_lock), "thread state lock must be held");
224
225 t->state = THREAD_STATE_DEAD;
226
227 waitlist_close(list: &t->waiters);
228 waitlist_wake(list: &t->waiters, INT_MAX);
229
230 while (true)
231 reschedule();
232 MOS_UNREACHABLE();
233}
234