1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/io/io.hpp"
4#include "mos/mm/mm.hpp"
5#include "mos/mm/paging/table_ops.hpp"
6#include "mos/platform/platform.hpp"
7#include "mos/syslog/printk.hpp"
8
9#include <mos/mm/cow.hpp>
10#include <mos/mm/mm_types.h>
11#include <mos/mm/mmap.hpp>
12#include <mos/mm/paging/paging.hpp>
13#include <mos/mos_global.h>
14#include <mos/tasks/process.hpp>
15#include <mos/tasks/task_types.hpp>
16
17/**
18 * @brief Check if the mmap flags are valid
19 *
20 * @param hint_addr The hint address
21 * @param mmap_flags The mmap flags
22 */
23static bool mmap_verify_arguments(ptr_t *hint_addr, MMapFlags mmap_flags)
24{
25 if ((*hint_addr % MOS_PAGE_SIZE) != 0)
26 {
27 pr_warn("hint address must be page-aligned");
28 return false;
29 }
30 const bool shared = mmap_flags & MMAP_SHARED; // when forked, shared between parent and child
31 const bool map_private = mmap_flags & MMAP_PRIVATE; // when forked, make it Copy-On-Write
32
33 if (shared == map_private)
34 {
35 pr_warn("mmap_file: shared and private are mutually exclusive, and one of them must be specified");
36 return false;
37 }
38
39 if (mmap_flags & MMAP_EXACT)
40 {
41 // always use the hint address if MMAP_EXACT is specified
42 return true;
43 }
44 else
45 {
46 // if no hint address is specified, use the default
47 if (*hint_addr == 0)
48 *hint_addr = MOS_ADDR_USER_MMAP;
49 }
50
51 return true;
52}
53
54ptr_t mmap_anonymous(MMContext *ctx, ptr_t hint_addr, MMapFlags flags, VMFlags VMFlags, size_t n_pages)
55{
56 if (!mmap_verify_arguments(hint_addr: &hint_addr, mmap_flags: flags))
57 return 0;
58
59 auto vmap = cow_allocate_zeroed_pages(handle: ctx, npages: n_pages, vaddr: hint_addr, flags: VMFlags, exact: flags & MMAP_EXACT);
60
61 if (vmap.isErr())
62 return vmap.getErr();
63
64 pr_dinfo2(vmm, "allocated %zd pages at " PTR_FMT, vmap->npages, vmap->vaddr);
65
66 const vmap_type_t type = (flags & MMAP_SHARED) ? VMAP_TYPE_SHARED : VMAP_TYPE_PRIVATE;
67 vmap_finalise_init(vmap: vmap.get(), content: VMAP_MMAP, type);
68 return vmap->vaddr;
69}
70
71ptr_t mmap_file(MMContext *ctx, ptr_t hint_addr, MMapFlags flags, VMFlags VMFlags, size_t n_pages, IO *io, off_t offset)
72{
73 if (!mmap_verify_arguments(hint_addr: &hint_addr, mmap_flags: flags))
74 return 0;
75
76 if (offset % MOS_PAGE_SIZE != 0)
77 {
78 pr_warn("mmap_file: offset must be page-aligned");
79 return 0;
80 }
81
82 const vmap_type_t type = (flags & MMAP_SHARED) ? VMAP_TYPE_SHARED : VMAP_TYPE_PRIVATE;
83
84 mm_lock_context_pair(ctx1: ctx);
85 auto vmap = mm_get_free_vaddr_locked(mmctx: ctx, n_pages, base_vaddr: hint_addr, exact: flags & MMAP_EXACT);
86 mm_unlock_context_pair(ctx1: ctx);
87
88 if (vmap.isErr())
89 {
90 pr_warn("mmap_file: no free virtual address space");
91 return 0;
92 }
93
94 vmap->vmflags = VMFlags;
95 vmap->type = type;
96
97 if (!io->map(vmap: vmap.get(), offset))
98 {
99 vmap_destroy(vmap: vmap.get());
100 pr_warn("mmap_file: could not map the file: io_mmap() failed");
101 return 0;
102 }
103
104 vmap_finalise_init(vmap: vmap.get(), content: VMAP_FILE, type);
105 return vmap->vaddr;
106}
107
108bool munmap(ptr_t addr, size_t size)
109{
110 spinlock_acquire(&current_process->mm->mm_lock);
111 vmap_t *const whole_map = vmap_obtain(current_process->mm, vaddr: addr);
112 if (unlikely(!whole_map))
113 {
114 spinlock_release(&current_process->mm->mm_lock);
115 pr_warn("munmap: could not find the vmap");
116 return false;
117 }
118
119 // will unmap all pages containing the range, even if they are not fully contained
120 const ptr_t range_start = ALIGN_DOWN_TO_PAGE(addr);
121 const ptr_t range_end = ALIGN_UP_TO_PAGE(addr + size);
122
123 const size_t start_pgoff = (range_start - whole_map->vaddr) / MOS_PAGE_SIZE;
124 const size_t end_pgoff = (range_end - whole_map->vaddr) / MOS_PAGE_SIZE;
125
126 vmap_t *const range_map = vmap_split_for_range(vmap: whole_map, rstart_pgoff: start_pgoff, rend_pgoff: end_pgoff);
127 if (unlikely(!range_map))
128 {
129 pr_warn("munmap: could not split the vmap");
130 spinlock_release(&current_process->mm->mm_lock);
131 spinlock_release(&whole_map->lock);
132 return false;
133 }
134
135 vmap_destroy(vmap: range_map);
136 spinlock_release(&current_process->mm->mm_lock);
137 spinlock_release(&whole_map->lock);
138 return true;
139}
140
141bool vm_protect(MMContext *mmctx, ptr_t addr, size_t size, VMFlags perm)
142{
143 MOS_ASSERT(addr % MOS_PAGE_SIZE == 0);
144 size = ALIGN_UP_TO_PAGE(size);
145
146 spinlock_acquire(&mmctx->mm_lock);
147 vmap_t *const first_part = vmap_obtain(mmctx, vaddr: addr);
148 const size_t addr_pgoff = (addr - first_part->vaddr) / MOS_PAGE_SIZE;
149
150 //
151 // first | second | third
152 // ^ ^
153 // | |
154 // addr addr + size
155 //
156
157 vmap_t *const to_protect = __extension__({
158 vmap_t *vmap = first_part;
159 // if 'addr_pgoff' is 0, then the first part is the one we want to protect,
160 // otherwise we need to split it to get the vmap that starts at 'addr'
161 if (addr_pgoff)
162 {
163 vmap = vmap_split(vmap: first_part, split: addr_pgoff);
164 spinlock_release(&first_part->lock); // release the lock on the first part, we don't need it
165 }
166
167 vmap;
168 });
169
170 const size_t size_pgoff = size / MOS_PAGE_SIZE;
171 if (size_pgoff < to_protect->npages)
172 {
173 // if there is a third part
174 vmap_t *const part3 = vmap_split(vmap: to_protect, split: size_pgoff);
175 spinlock_release(&part3->lock); // release the lock on the third part, we don't need it
176 }
177
178 if (to_protect->io)
179 {
180 if (!to_protect->io->VerifyMMapPermissions(flags: perm, is_private: to_protect->type == VMAP_TYPE_PRIVATE))
181 {
182 spinlock_release(&to_protect->lock); // permission denied
183 spinlock_release(&mmctx->mm_lock);
184 return false;
185 }
186 }
187
188 const bool read_lost = to_protect->vmflags.test(b: VM_READ) && !perm.test(b: VM_READ); // if we lose read permission
189 const bool write_lost = to_protect->vmflags.test(b: VM_WRITE) && !perm.test(b: VM_WRITE); // if we lose write permission
190 const bool exec_lost = to_protect->vmflags.test(b: VM_EXEC) && !perm.test(b: VM_EXEC); // if we lose exec permission
191
192 VMFlags mask = VM_NONE;
193 if (read_lost)
194 {
195 mask |= VM_READ;
196 pr_warn("read permission lost, this is not supported yet");
197 }
198
199 if (write_lost)
200 mask |= VM_WRITE;
201 if (exec_lost)
202 mask |= VM_EXEC;
203
204 // remove permissions immediately
205 mm_do_mask_flags(max: mmctx->pgd, vaddr: to_protect->vaddr, n_pages: to_protect->npages, to_remove: mask);
206
207 // do not add permissions immediately, we will let the page fault handler do it
208 // e.g. write permission granted only when the page is written to (and proper e.g. CoW)
209 // can be done.
210
211 // let page fault handler do the real flags update
212 to_protect->vmflags = perm | VM_USER;
213
214 spinlock_release(&to_protect->lock);
215 spinlock_release(&mmctx->mm_lock);
216 return true;
217}
218