1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/x86/tasks/context.hpp"
4
5#include "mos/platform/platform_defs.hpp"
6#include "mos/x86/descriptors/descriptors.hpp"
7#include "mos/x86/tasks/fpu_context.hpp"
8
9#include <mos/lib/structures/stack.hpp>
10#include <mos/mos_global.h>
11#include <mos/platform/platform.hpp>
12#include <mos/syslog/printk.hpp>
13#include <mos/tasks/schedule.hpp>
14#include <mos/tasks/task_types.hpp>
15#include <mos/types.hpp>
16#include <mos/x86/cpu/cpu.hpp>
17#include <mos/x86/mm/paging_impl.hpp>
18#include <mos/x86/x86_interrupt.hpp>
19#include <mos/x86/x86_platform.hpp>
20#include <mos_stdlib.hpp>
21#include <mos_string.hpp>
22
23typedef void (*switch_func_t)();
24
25extern "C" void x86_normal_switch_impl();
26extern "C" void x86_context_switch_impl(ptr_t *old_stack, ptr_t new_kstack, switch_func_t switcher, bool *lock);
27mos::Slab<u8> xsave_area_slab("x86.xsave", 0);
28
29static void x86_start_kernel_thread()
30{
31 platform_regs_t *regs = platform_thread_regs(current_thread);
32 const thread_entry_t entry = (thread_entry_t) regs->ip;
33 void *const arg = (void *) regs->di;
34 entry(arg);
35 MOS_UNREACHABLE();
36}
37
38static void x86_start_user_thread()
39{
40 x86_interrupt_return_impl(regs: platform_thread_regs(current_thread));
41}
42
43static platform_regs_t *x86_setup_thread_common(Thread *thread)
44{
45 MOS_ASSERT_X(thread->platform_options.xsaveptr == NULL, "xsaveptr should be NULL");
46 thread->platform_options.xsaveptr = xsave_area_slab.create();
47 thread->k_stack.head -= sizeof(platform_regs_t);
48 platform_regs_t *regs = platform_thread_regs(thread);
49 *regs = (platform_regs_t) {};
50
51 regs->cs = thread->mode == THREAD_MODE_KERNEL ? GDT_SEGMENT_KCODE : GDT_SEGMENT_USERCODE | 3;
52 regs->ss = thread->mode == THREAD_MODE_KERNEL ? GDT_SEGMENT_KDATA : GDT_SEGMENT_USERDATA | 3;
53 regs->sp = thread->mode == THREAD_MODE_KERNEL ? thread->k_stack.top : thread->u_stack.top;
54
55 if (thread->mode == THREAD_MODE_USER)
56 {
57 regs->eflags = 0x202;
58 if (thread->owner->platform_options.iopl)
59 regs->eflags |= 0x3000;
60 }
61
62 return regs;
63}
64
65void platform_context_setup_main_thread(Thread *thread, ptr_t entry, ptr_t sp, int argc, ptr_t argv, ptr_t envp)
66{
67 platform_regs_t *regs = x86_setup_thread_common(thread);
68 regs->ip = entry;
69 regs->di = argc;
70 regs->si = argv;
71 regs->dx = envp;
72 regs->sp = sp;
73}
74
75void platform_context_cleanup(Thread *thread)
76{
77 if (thread->mode == THREAD_MODE_USER)
78 if (thread->platform_options.xsaveptr)
79 kfree(ptr: thread->platform_options.xsaveptr), thread->platform_options.xsaveptr = NULL;
80}
81
82void platform_context_setup_child_thread(Thread *thread, thread_entry_t entry, void *arg)
83{
84 platform_regs_t *regs = x86_setup_thread_common(thread);
85 regs->di = (ptr_t) arg;
86 regs->ip = (ptr_t) entry;
87
88 if (thread->mode == THREAD_MODE_KERNEL)
89 return;
90
91 MOS_ASSERT(thread->owner->mm == current_mm);
92 MOS_ASSERT(thread != thread->owner->main_thread);
93
94 regs->di = (ptr_t) arg; // argument
95 regs->sp = thread->u_stack.head; // update the stack pointer
96}
97
98void platform_context_clone(Thread *from, Thread *to)
99{
100 platform_regs_t *to_regs = platform_thread_regs(thread: to);
101 *to_regs = *platform_thread_regs(thread: from);
102 to_regs->ax = 0; // return 0 for the child
103
104 // synchronise the sp of user stack
105 if (to->mode == THREAD_MODE_USER)
106 {
107 to->u_stack.head = to_regs->sp;
108 to->platform_options.xsaveptr = xsave_area_slab.create();
109 memcpy(dest: to->platform_options.xsaveptr, src: from->platform_options.xsaveptr, n: xsave_area_slab.size());
110 }
111
112 to->platform_options.fs_base = from->platform_options.fs_base;
113 to->platform_options.gs_base = from->platform_options.gs_base;
114 to->k_stack.head -= sizeof(platform_regs_t);
115}
116
117void platform_switch_to_thread(Thread *current, Thread *new_thread, switch_flags_t switch_flags)
118{
119 const switch_func_t switch_func = [=]() -> switch_func_t
120 {
121 switch (switch_flags)
122 {
123 case SWITCH_TO_NEW_USER_THREAD: return x86_start_user_thread; break;
124 case SWITCH_TO_NEW_KERNEL_THREAD: return x86_start_kernel_thread; break;
125 default: return x86_normal_switch_impl; break;
126 }
127 }();
128
129 if (current)
130 x86_xsave_thread(thread: current);
131
132 x86_xrstor_thread(thread: new_thread);
133 x86_set_fsbase(thread: new_thread);
134
135 current_cpu->thread = new_thread;
136 __atomic_store_n(&per_cpu(x86_cpu_descriptor)->tss.rspN[0], new_thread->k_stack.top, __ATOMIC_SEQ_CST);
137
138 ptr_t trash = 0;
139 ptr_t *const stack_ptr = current ? &current->k_stack.head : &trash;
140
141 bool trash_lock = false;
142 bool *const lock = current ? &current->state_lock.flag : &trash_lock;
143 x86_context_switch_impl(old_stack: stack_ptr, new_kstack: new_thread->k_stack.head, switcher: switch_func, lock);
144}
145
146void x86_set_fsbase(Thread *thread)
147{
148 __asm__ volatile("wrfsbase %0" ::"r"(thread->platform_options.fs_base) : "memory");
149}
150