1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/io/io.h"
4#include "mos/mm/mm.h"
5#include "mos/mm/paging/table_ops.h"
6#include "mos/platform/platform.h"
7#include "mos/syslog/printk.h"
8
9#include <mos/mm/cow.h>
10#include <mos/mm/mm_types.h>
11#include <mos/mm/mmap.h>
12#include <mos/mm/paging/paging.h>
13#include <mos/mos_global.h>
14#include <mos/tasks/process.h>
15#include <mos/tasks/task_types.h>
16
17/**
18 * @brief Check if the mmap flags are valid
19 *
20 * @param hint_addr The hint address
21 * @param mmap_flags The mmap flags
22 */
23static bool mmap_verify_arguments(ptr_t *hint_addr, mmap_flags_t mmap_flags)
24{
25 if ((*hint_addr % MOS_PAGE_SIZE) != 0)
26 {
27 pr_warn("hint address must be page-aligned");
28 return false;
29 }
30 const bool shared = mmap_flags & MMAP_SHARED; // when forked, shared between parent and child
31 const bool private = mmap_flags & MMAP_PRIVATE; // when forked, make it Copy-On-Write
32
33 if (shared == private)
34 {
35 pr_warn("mmap_file: shared and private are mutually exclusive, and one of them must be specified");
36 return NULL;
37 }
38
39 if (mmap_flags & MMAP_EXACT)
40 {
41 // always use the hint address if MMAP_EXACT is specified
42 return true;
43 }
44 else
45 {
46 // if no hint address is specified, use the default
47 if (*hint_addr == 0)
48 *hint_addr = MOS_ADDR_USER_MMAP;
49 }
50
51 return true;
52}
53
54ptr_t mmap_anonymous(mm_context_t *ctx, ptr_t hint_addr, mmap_flags_t flags, vm_flags vm_flags, size_t n_pages)
55{
56 if (!mmap_verify_arguments(hint_addr: &hint_addr, mmap_flags: flags))
57 return 0;
58
59 const valloc_flags valloc_flags = (flags & MMAP_EXACT) ? VALLOC_EXACT : VALLOC_DEFAULT;
60
61 vmap_t *vmap = cow_allocate_zeroed_pages(handle: ctx, npages: n_pages, vaddr: hint_addr, hints: valloc_flags, flags: vm_flags);
62
63 if (IS_ERR(ptr: vmap))
64 return PTR_ERR(ptr: vmap);
65
66 pr_dinfo2(vmm, "allocated %zd pages at " PTR_FMT, vmap->npages, vmap->vaddr);
67
68 const vmap_type_t type = (flags & MMAP_SHARED) ? VMAP_TYPE_SHARED : VMAP_TYPE_PRIVATE;
69 vmap_finalise_init(vmap, content: VMAP_MMAP, type);
70 return vmap->vaddr;
71}
72
73ptr_t mmap_file(mm_context_t *ctx, ptr_t hint_addr, mmap_flags_t flags, vm_flags vm_flags, size_t n_pages, io_t *io, off_t offset)
74{
75 if (!mmap_verify_arguments(hint_addr: &hint_addr, mmap_flags: flags))
76 return 0;
77
78 if (offset % MOS_PAGE_SIZE != 0)
79 {
80 pr_warn("mmap_file: offset must be page-aligned");
81 return 0;
82 }
83
84 const valloc_flags valloc_flags = (flags & MMAP_EXACT) ? VALLOC_EXACT : VALLOC_DEFAULT;
85 const vmap_type_t type = (flags & MMAP_SHARED) ? VMAP_TYPE_SHARED : VMAP_TYPE_PRIVATE;
86
87 mm_lock_ctx_pair(ctx1: ctx, NULL);
88 vmap_t *vmap = mm_get_free_vaddr_locked(mmctx: ctx, n_pages, base_vaddr: hint_addr, flags: valloc_flags);
89 mm_unlock_ctx_pair(ctx1: ctx, NULL);
90
91 if (IS_ERR(ptr: vmap))
92 {
93 pr_warn("mmap_file: no free virtual address space");
94 return 0;
95 }
96
97 vmap->vmflags = vm_flags;
98 vmap->type = type;
99
100 if (!io_mmap(io, vmap, offset))
101 {
102 vmap_destroy(vmap);
103 pr_warn("mmap_file: could not map the file: io_mmap() failed");
104 return 0;
105 }
106
107 vmap_finalise_init(vmap, content: VMAP_FILE, type);
108 return vmap->vaddr;
109}
110
111bool munmap(ptr_t addr, size_t size)
112{
113 spinlock_acquire(&current_process->mm->mm_lock);
114 vmap_t *const whole_map = vmap_obtain(current_process->mm, vaddr: addr, NULL);
115 if (unlikely(!whole_map))
116 {
117 spinlock_release(&current_process->mm->mm_lock);
118 pr_warn("munmap: could not find the vmap");
119 return false;
120 }
121
122 // will unmap all pages containing the range, even if they are not fully contained
123 const ptr_t range_start = ALIGN_DOWN_TO_PAGE(addr);
124 const ptr_t range_end = ALIGN_UP_TO_PAGE(addr + size);
125
126 const size_t start_pgoff = (range_start - whole_map->vaddr) / MOS_PAGE_SIZE;
127 const size_t end_pgoff = (range_end - whole_map->vaddr) / MOS_PAGE_SIZE;
128
129 vmap_t *const range_map = vmap_split_for_range(vmap: whole_map, rstart_pgoff: start_pgoff, rend_pgoff: end_pgoff);
130 if (unlikely(!range_map))
131 {
132 pr_warn("munmap: could not split the vmap");
133 spinlock_release(&current_process->mm->mm_lock);
134 spinlock_release(&whole_map->lock);
135 return false;
136 }
137
138 vmap_destroy(vmap: range_map);
139 spinlock_release(&current_process->mm->mm_lock);
140 spinlock_release(&whole_map->lock);
141 return true;
142}
143
144bool vm_protect(mm_context_t *mmctx, ptr_t addr, size_t size, vm_flags perm)
145{
146 MOS_ASSERT(addr % MOS_PAGE_SIZE == 0);
147 size = ALIGN_UP_TO_PAGE(size);
148
149 spinlock_acquire(&mmctx->mm_lock);
150 vmap_t *const first_part = vmap_obtain(mmctx, vaddr: addr, NULL);
151 const size_t addr_pgoff = (addr - first_part->vaddr) / MOS_PAGE_SIZE;
152
153 //
154 // first | second | third
155 // ^ ^
156 // | |
157 // addr addr + size
158 //
159
160 vmap_t *const to_protect = __extension__({
161 vmap_t *vmap = first_part;
162 // if 'addr_pgoff' is 0, then the first part is the one we want to protect,
163 // otherwise we need to split it to get the vmap that starts at 'addr'
164 if (addr_pgoff)
165 {
166 vmap = vmap_split(vmap: first_part, split: addr_pgoff);
167 spinlock_release(&first_part->lock); // release the lock on the first part, we don't need it
168 }
169
170 vmap;
171 });
172
173 const size_t size_pgoff = size / MOS_PAGE_SIZE;
174 if (size_pgoff < to_protect->npages)
175 {
176 // if there is a third part
177 vmap_t *const part3 = vmap_split(vmap: to_protect, split: size_pgoff);
178 spinlock_release(&part3->lock); // release the lock on the third part, we don't need it
179 }
180
181 if (to_protect->io)
182 {
183 if (!io_mmap_perm_check(io: to_protect->io, flags: perm, is_private: to_protect->type == VMAP_TYPE_PRIVATE))
184 {
185 spinlock_release(&to_protect->lock); // permission denied
186 spinlock_release(&mmctx->mm_lock);
187 return false;
188 }
189 }
190
191 const bool read_lost = to_protect->vmflags & VM_READ && !(perm & VM_READ); // if we lose read permission
192 const bool write_lost = to_protect->vmflags & VM_WRITE && !(perm & VM_WRITE); // if we lose write permission
193 const bool exec_lost = to_protect->vmflags & VM_EXEC && !(perm & VM_EXEC); // if we lose exec permission
194
195 vm_flags mask = 0;
196 if (read_lost)
197 {
198 mask |= VM_READ;
199 pr_warn("read permission lost, this is not supported yet");
200 }
201
202 if (write_lost)
203 mask |= VM_WRITE;
204 if (exec_lost)
205 mask |= VM_EXEC;
206
207 // remove permissions immediately
208 mm_do_mask_flags(max: mmctx->pgd, vaddr: to_protect->vaddr, n_pages: to_protect->npages, to_remove: mask);
209
210 // do not add permissions immediately, we will let the page fault handler do it
211 // e.g. write permission granted only when the page is written to (and proper e.g. CoW)
212 // can be done.
213
214 // let page fault handler do the real flags update
215 to_protect->vmflags = perm | VM_USER;
216
217 spinlock_release(&to_protect->lock);
218 spinlock_release(&mmctx->mm_lock);
219 return true;
220}
221