1 | // SPDX-License-Identifier: GPL-3.0-or-later |
2 | |
3 | #include "mos/lib/sync/spinlock.h" |
4 | #include "mos/mm/mm.h" |
5 | |
6 | #include <errno.h> |
7 | #include <limits.h> |
8 | #include <mos/lib/structures/hashmap.h> |
9 | #include <mos/lib/structures/hashmap_common.h> |
10 | #include <mos/lib/structures/list.h> |
11 | #include <mos/mm/cow.h> |
12 | #include <mos/mm/paging/paging.h> |
13 | #include <mos/platform/platform.h> |
14 | #include <mos/syslog/printk.h> |
15 | #include <mos/tasks/process.h> |
16 | #include <mos/tasks/schedule.h> |
17 | #include <mos/tasks/task_types.h> |
18 | #include <mos/tasks/thread.h> |
19 | #include <mos/tasks/wait.h> |
20 | #include <mos_stdlib.h> |
21 | #include <mos_string.h> |
22 | |
23 | hashmap_t thread_table = { 0 }; // tid_t -> thread_t |
24 | |
25 | static tid_t new_thread_id(void) |
26 | { |
27 | static tid_t next = 1; |
28 | return (tid_t){ next++ }; |
29 | } |
30 | |
31 | thread_t *thread_allocate(process_t *owner, thread_mode tflags) |
32 | { |
33 | thread_t *t = kmalloc(thread_cache); |
34 | t->magic = THREAD_MAGIC_THRD; |
35 | t->tid = new_thread_id(); |
36 | t->owner = owner; |
37 | t->state = THREAD_STATE_CREATED; |
38 | t->mode = tflags; |
39 | waitlist_init(list: &t->waiters); |
40 | linked_list_init(head_node: &t->signal_info.pending); |
41 | linked_list_init(list_node(t)); |
42 | list_node_append(head: &owner->threads, list_node(t)); |
43 | return t; |
44 | } |
45 | |
46 | void thread_destroy(thread_t *thread) |
47 | { |
48 | MOS_ASSERT_X(thread != current_thread, "you cannot just destroy yourself" ); |
49 | if (!thread_is_valid(thread)) |
50 | return; |
51 | |
52 | hashmap_remove(map: &thread_table, key: thread->tid); |
53 | |
54 | pr_dinfo2(thread, "destroying thread %pt" , (void *) thread); |
55 | MOS_ASSERT_X(spinlock_is_locked(&thread->state_lock), "thread state lock must be held" ); |
56 | MOS_ASSERT_X(thread->state == THREAD_STATE_DEAD, "thread must be dead for destroy" ); |
57 | |
58 | platform_context_cleanup(thread); |
59 | |
60 | if (thread->name) |
61 | kfree(ptr: thread->name); |
62 | |
63 | if (thread->mode == THREAD_MODE_USER) |
64 | { |
65 | process_t *const owner = thread->owner; |
66 | spinlock_acquire(&owner->mm->mm_lock); |
67 | vmap_t *const stack = vmap_obtain(mmctx: owner->mm, vaddr: (ptr_t) thread->u_stack.top - 1, NULL); |
68 | vmap_destroy(vmap: stack); |
69 | spinlock_release(&owner->mm->mm_lock); |
70 | } |
71 | |
72 | mm_free_pages(va_phyframe((ptr_t) thread->k_stack.top) - MOS_STACK_PAGES_KERNEL, MOS_STACK_PAGES_KERNEL); |
73 | |
74 | kfree(ptr: thread); |
75 | } |
76 | |
77 | thread_t *thread_new(process_t *owner, thread_mode tmode, const char *name, size_t stack_size, void *explicit_stack_top) |
78 | { |
79 | thread_t *t = thread_allocate(owner, tflags: tmode); |
80 | |
81 | t->name = strdup(src: name); |
82 | |
83 | pr_dinfo2(thread, "creating new thread %pt, owner=%pp" , (void *) t, (void *) owner); |
84 | |
85 | // Kernel stack |
86 | const ptr_t kstack_blk = phyframe_va(mm_get_free_pages(MOS_STACK_PAGES_KERNEL)); |
87 | stack_init(stack: &t->k_stack, mem_region_bottom: (void *) kstack_blk, MOS_STACK_PAGES_KERNEL * MOS_PAGE_SIZE); |
88 | |
89 | if (tmode != THREAD_MODE_USER) |
90 | { |
91 | stack_init(stack: &t->u_stack, NULL, size: 0); // kernel thread has no user stack |
92 | return t; |
93 | } |
94 | |
95 | // User stack |
96 | const size_t user_stack_size = stack_size ? stack_size : MOS_STACK_PAGES_USER * MOS_PAGE_SIZE; |
97 | if (!explicit_stack_top) |
98 | { |
99 | vmap_t *stack_vmap = cow_allocate_zeroed_pages(handle: owner->mm, npages: user_stack_size / MOS_PAGE_SIZE, MOS_ADDR_USER_STACK, hints: VALLOC_DEFAULT, flags: VM_USER_RW); |
100 | stack_init(stack: &t->u_stack, mem_region_bottom: (void *) stack_vmap->vaddr, size: user_stack_size); |
101 | vmap_finalise_init(vmap: stack_vmap, content: VMAP_STACK, type: VMAP_TYPE_PRIVATE); |
102 | return t; |
103 | } |
104 | |
105 | // check if the stack is valid |
106 | mm_lock_ctx_pair(ctx1: owner->mm, NULL); |
107 | vmap_t *stack_vmap = vmap_obtain(mmctx: owner->mm, vaddr: (ptr_t) explicit_stack_top, NULL); |
108 | if (!stack_vmap) |
109 | { |
110 | pr_warn("invalid stack pointer %pt" , explicit_stack_top); |
111 | goto done_efault; |
112 | } |
113 | |
114 | // check if the stack vmap is valid |
115 | if (stack_vmap->content == VMAP_STACK) // has been claimed by another thread? |
116 | { |
117 | pr_warn("stack %pt has been claimed by another thread" , explicit_stack_top); |
118 | goto done_efault; |
119 | } |
120 | |
121 | // check if the stack is large enough |
122 | if (stack_vmap->npages < user_stack_size / MOS_PAGE_SIZE) |
123 | { |
124 | pr_warn("stack %pt is too small (size=%zu, required=%zu)" , explicit_stack_top, stack_vmap->npages * MOS_PAGE_SIZE, user_stack_size); |
125 | goto done_efault; |
126 | } |
127 | |
128 | // check if the stack is writable |
129 | if (!(stack_vmap->vmflags & VM_USER_RW)) |
130 | { |
131 | pr_warn("stack %pt is not writable" , explicit_stack_top); |
132 | goto done_efault; |
133 | } |
134 | |
135 | const ptr_t stack_bottom = ALIGN_UP_TO_PAGE((ptr_t) explicit_stack_top) - user_stack_size; |
136 | vmap_t *second = vmap_split(vmap: stack_vmap, split: (stack_bottom - stack_vmap->vaddr) / MOS_PAGE_SIZE); |
137 | spinlock_release(&stack_vmap->lock); |
138 | stack_vmap = second; |
139 | |
140 | stack_vmap->content = VMAP_STACK; |
141 | stack_vmap->type = VMAP_TYPE_PRIVATE; |
142 | spinlock_release(&stack_vmap->lock); |
143 | mm_unlock_ctx_pair(ctx1: owner->mm, NULL); |
144 | stack_init(stack: &t->u_stack, mem_region_bottom: (void *) stack_bottom, size: user_stack_size); |
145 | t->u_stack.head = (ptr_t) explicit_stack_top; |
146 | return t; |
147 | |
148 | done_efault: |
149 | spinlock_release(&stack_vmap->lock); |
150 | mm_unlock_ctx_pair(ctx1: owner->mm, NULL); |
151 | spinlock_acquire(&t->state_lock); |
152 | thread_destroy(thread: t); |
153 | return ERR_PTR(error: -EFAULT); // invalid stack pointer |
154 | } |
155 | |
156 | thread_t *thread_complete_init(thread_t *thread) |
157 | { |
158 | if (!thread_is_valid(thread)) |
159 | return NULL; |
160 | |
161 | thread_t *old = hashmap_put(map: &thread_table, key: thread->tid, value: thread); |
162 | MOS_ASSERT(old == NULL); |
163 | return thread; |
164 | } |
165 | |
166 | thread_t *thread_get(tid_t tid) |
167 | { |
168 | thread_t *t = hashmap_get(map: &thread_table, key: tid); |
169 | if (thread_is_valid(thread: t)) |
170 | return t; |
171 | |
172 | return NULL; |
173 | } |
174 | |
175 | bool thread_wait_for_tid(tid_t tid) |
176 | { |
177 | thread_t *target = thread_get(tid); |
178 | if (target == NULL) |
179 | { |
180 | pr_warn("wait_for_tid(%d) from pid %d (%s) but thread does not exist" , tid, current_process->pid, current_process->name); |
181 | return false; |
182 | } |
183 | |
184 | if (target->owner != current_process) |
185 | { |
186 | pr_warn("wait_for_tid(%d) from process %pp but thread belongs to %pp" , tid, (void *) current_process, (void *) target->owner); |
187 | return false; |
188 | } |
189 | |
190 | bool ok = reschedule_for_waitlist(waitlist: &target->waiters); |
191 | MOS_UNUSED(ok); // true: thread is dead, false: thread is already dead at the time of calling |
192 | |
193 | return true; |
194 | } |
195 | |
196 | void thread_exit(thread_t *t) |
197 | { |
198 | MOS_ASSERT_X(thread_is_valid(t), "thread_handle_exit() called on invalid thread" ); |
199 | spinlock_acquire(&t->state_lock); |
200 | thread_exit_locked(t); |
201 | } |
202 | |
203 | [[noreturn]] void thread_exit_locked(thread_t *t) |
204 | { |
205 | MOS_ASSERT_X(thread_is_valid(t), "thread_exit_locked() called on invalid thread" ); |
206 | |
207 | pr_dinfo(thread, "thread %pt is exiting" , (void *) t); |
208 | |
209 | MOS_ASSERT_X(spinlock_is_locked(&t->state_lock), "thread state lock must be held" ); |
210 | |
211 | t->state = THREAD_STATE_DEAD; |
212 | |
213 | waitlist_close(list: &t->waiters); |
214 | waitlist_wake(list: &t->waiters, INT_MAX); |
215 | |
216 | while (true) |
217 | reschedule(); |
218 | MOS_UNREACHABLE(); |
219 | } |
220 | |