1 | // SPDX-License-Identifier: GPL-3.0-or-later |
2 | |
3 | #include "mos/x86/tasks/context.h" |
4 | |
5 | #include "mos/platform/platform_defs.h" |
6 | #include "mos/tasks/signal.h" |
7 | #include "mos/x86/descriptors/descriptors.h" |
8 | #include "mos/x86/tasks/fpu_context.h" |
9 | |
10 | #include <mos/lib/structures/stack.h> |
11 | #include <mos/mos_global.h> |
12 | #include <mos/platform/platform.h> |
13 | #include <mos/syslog/printk.h> |
14 | #include <mos/tasks/schedule.h> |
15 | #include <mos/tasks/task_types.h> |
16 | #include <mos/types.h> |
17 | #include <mos/x86/cpu/cpu.h> |
18 | #include <mos/x86/mm/paging_impl.h> |
19 | #include <mos/x86/x86_interrupt.h> |
20 | #include <mos/x86/x86_platform.h> |
21 | #include <mos_stdlib.h> |
22 | #include <mos_string.h> |
23 | |
24 | typedef void (*switch_func_t)(); |
25 | |
26 | extern void x86_normal_switch_impl(); |
27 | extern void x86_context_switch_impl(ptr_t *old_stack, ptr_t new_kstack, switch_func_t switcher, bool *lock); |
28 | |
29 | static void x86_start_kernel_thread() |
30 | { |
31 | platform_regs_t *regs = platform_thread_regs(current_thread); |
32 | const thread_entry_t entry = (thread_entry_t) regs->ip; |
33 | void *const arg = (void *) regs->di; |
34 | entry(arg); |
35 | MOS_UNREACHABLE(); |
36 | } |
37 | |
38 | static void x86_start_user_thread() |
39 | { |
40 | platform_regs_t *regs = platform_thread_regs(current_thread); |
41 | signal_exit_to_user_prepare(regs); |
42 | platform_return_to_userspace(regs); |
43 | } |
44 | |
45 | static platform_regs_t *x86_setup_thread_common(thread_t *thread) |
46 | { |
47 | MOS_ASSERT_X(thread->platform_options.xsaveptr == NULL, "xsaveptr should be NULL" ); |
48 | thread->platform_options.xsaveptr = kmalloc(xsave_area_slab); |
49 | thread->k_stack.head -= sizeof(platform_regs_t); |
50 | platform_regs_t *regs = platform_thread_regs(thread); |
51 | *regs = (platform_regs_t){ 0 }; |
52 | |
53 | regs->cs = thread->mode == THREAD_MODE_KERNEL ? GDT_SEGMENT_KCODE : GDT_SEGMENT_USERCODE | 3; |
54 | regs->ss = thread->mode == THREAD_MODE_KERNEL ? GDT_SEGMENT_KDATA : GDT_SEGMENT_USERDATA | 3; |
55 | regs->sp = thread->mode == THREAD_MODE_KERNEL ? thread->k_stack.top : thread->u_stack.top; |
56 | |
57 | if (thread->mode == THREAD_MODE_USER) |
58 | { |
59 | regs->eflags = 0x202; |
60 | if (thread->owner->platform_options.iopl) |
61 | regs->eflags |= 0x3000; |
62 | } |
63 | |
64 | return regs; |
65 | } |
66 | |
67 | void platform_context_setup_main_thread(thread_t *thread, ptr_t entry, ptr_t sp, int argc, ptr_t argv, ptr_t envp) |
68 | { |
69 | platform_regs_t *regs = x86_setup_thread_common(thread); |
70 | regs->ip = entry; |
71 | regs->di = argc; |
72 | regs->si = argv; |
73 | regs->dx = envp; |
74 | regs->sp = sp; |
75 | } |
76 | |
77 | void platform_context_cleanup(thread_t *thread) |
78 | { |
79 | if (thread->mode == THREAD_MODE_USER) |
80 | if (thread->platform_options.xsaveptr) |
81 | kfree(ptr: thread->platform_options.xsaveptr), thread->platform_options.xsaveptr = NULL; |
82 | } |
83 | |
84 | void platform_context_setup_child_thread(thread_t *thread, thread_entry_t entry, void *arg) |
85 | { |
86 | platform_regs_t *regs = x86_setup_thread_common(thread); |
87 | regs->di = (ptr_t) arg; |
88 | regs->ip = (ptr_t) entry; |
89 | |
90 | if (thread->mode == THREAD_MODE_KERNEL) |
91 | return; |
92 | |
93 | MOS_ASSERT(thread->owner->mm == current_mm); |
94 | MOS_ASSERT(thread != thread->owner->main_thread); |
95 | |
96 | regs->di = (ptr_t) arg; // argument |
97 | regs->sp = thread->u_stack.head; // update the stack pointer |
98 | } |
99 | |
100 | void platform_context_clone(const thread_t *from, thread_t *to) |
101 | { |
102 | platform_regs_t *to_regs = platform_thread_regs(thread: to); |
103 | *to_regs = *platform_thread_regs(thread: from); |
104 | to_regs->ax = 0; // return 0 for the child |
105 | |
106 | // synchronise the sp of user stack |
107 | if (to->mode == THREAD_MODE_USER) |
108 | { |
109 | to->u_stack.head = to_regs->sp; |
110 | to->platform_options.xsaveptr = kmalloc(xsave_area_slab); |
111 | memcpy(dest: to->platform_options.xsaveptr, src: from->platform_options.xsaveptr, n: platform_info->arch_info.xsave_size); |
112 | } |
113 | |
114 | to->platform_options.fs_base = from->platform_options.fs_base; |
115 | to->platform_options.gs_base = from->platform_options.gs_base; |
116 | to->k_stack.head -= sizeof(platform_regs_t); |
117 | } |
118 | |
119 | void platform_switch_to_thread(thread_t *current, thread_t *new_thread, switch_flags_t switch_flags) |
120 | { |
121 | const switch_func_t switch_func = statement_expr(switch_func_t, { |
122 | switch (switch_flags) |
123 | { |
124 | case SWITCH_TO_NEW_USER_THREAD: retval = x86_start_user_thread; break; |
125 | case SWITCH_TO_NEW_KERNEL_THREAD: retval = x86_start_kernel_thread; break; |
126 | default: retval = x86_normal_switch_impl; break; |
127 | } |
128 | }); |
129 | |
130 | if (current) |
131 | x86_xsave_thread(thread: current); |
132 | |
133 | x86_xrstor_thread(thread: new_thread); |
134 | x86_set_fsbase(thread: new_thread); |
135 | |
136 | __atomic_store_n(¤t_cpu->thread, new_thread, __ATOMIC_SEQ_CST); |
137 | __atomic_store_n(&per_cpu(x86_cpu_descriptor)->tss.rsp0, new_thread->k_stack.top, __ATOMIC_SEQ_CST); |
138 | |
139 | ptr_t trash = 0; |
140 | ptr_t *const stack_ptr = current ? ¤t->k_stack.head : &trash; |
141 | |
142 | bool trash_lock = false; |
143 | bool *const lock = current ? ¤t->state_lock.flag : &trash_lock; |
144 | x86_context_switch_impl(old_stack: stack_ptr, new_kstack: new_thread->k_stack.head, switcher: switch_func, lock); |
145 | |
146 | // |
147 | } |
148 | |
149 | void x86_set_fsbase(thread_t *thread) |
150 | { |
151 | __asm__ volatile("wrfsbase %0" ::"r" (thread->platform_options.fs_base) : "memory" ); |
152 | } |
153 | |