MOS Source Code
Loading...
Searching...
No Matches
mmap.cpp
Go to the documentation of this file.
1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/io/io.hpp"
4#include "mos/mm/mm.hpp"
8
9#include <mos/mm/cow.hpp>
10#include <mos/mm/mm_types.h>
11#include <mos/mm/mmap.hpp>
13#include <mos/mos_global.h>
14#include <mos/tasks/process.hpp>
16
23static bool mmap_verify_arguments(ptr_t *hint_addr, MMapFlags mmap_flags)
24{
25 if ((*hint_addr % MOS_PAGE_SIZE) != 0)
26 {
27 pr_warn("hint address must be page-aligned");
28 return false;
29 }
30 const bool shared = mmap_flags & MMAP_SHARED; // when forked, shared between parent and child
31 const bool map_private = mmap_flags & MMAP_PRIVATE; // when forked, make it Copy-On-Write
32
33 if (shared == map_private)
34 {
35 pr_warn("mmap_file: shared and private are mutually exclusive, and one of them must be specified");
36 return false;
37 }
38
39 if (mmap_flags & MMAP_EXACT)
40 {
41 // always use the hint address if MMAP_EXACT is specified
42 return true;
43 }
44 else
45 {
46 // if no hint address is specified, use the default
47 if (*hint_addr == 0)
48 *hint_addr = MOS_ADDR_USER_MMAP;
49 }
50
51 return true;
52}
53
54ptr_t mmap_anonymous(MMContext *ctx, ptr_t hint_addr, MMapFlags flags, VMFlags VMFlags, size_t n_pages)
55{
56 if (!mmap_verify_arguments(&hint_addr, flags))
57 return 0;
58
59 auto vmap = cow_allocate_zeroed_pages(ctx, n_pages, hint_addr, VMFlags, flags & MMAP_EXACT);
60
61 if (vmap.isErr())
62 return vmap.getErr();
63
64 pr_dinfo2(vmm, "allocated %zd pages at " PTR_FMT, vmap->npages, vmap->vaddr);
65
67 vmap_finalise_init(vmap.get(), VMAP_MMAP, type);
68 return vmap->vaddr;
69}
70
71ptr_t mmap_file(MMContext *ctx, ptr_t hint_addr, MMapFlags flags, VMFlags VMFlags, size_t n_pages, IO *io, off_t offset)
72{
73 if (!mmap_verify_arguments(&hint_addr, flags))
74 return 0;
75
76 if (offset % MOS_PAGE_SIZE != 0)
77 {
78 pr_warn("mmap_file: offset must be page-aligned");
79 return 0;
80 }
81
83
85 auto vmap = mm_get_free_vaddr_locked(ctx, n_pages, hint_addr, flags & MMAP_EXACT);
87
88 if (vmap.isErr())
89 {
90 pr_warn("mmap_file: no free virtual address space");
91 return 0;
92 }
93
94 vmap->vmflags = VMFlags;
95 vmap->type = type;
96
97 if (!io->map(vmap.get(), offset))
98 {
99 vmap_destroy(vmap.get());
100 pr_warn("mmap_file: could not map the file: io_mmap() failed");
101 return 0;
102 }
103
104 vmap_finalise_init(vmap.get(), VMAP_FILE, type);
105 return vmap->vaddr;
106}
107
108bool munmap(ptr_t addr, size_t size)
109{
110 spinlock_acquire(&current_process->mm->mm_lock);
111 vmap_t *const whole_map = vmap_obtain(current_process->mm, addr);
112 if (unlikely(!whole_map))
113 {
114 spinlock_release(&current_process->mm->mm_lock);
115 pr_warn("munmap: could not find the vmap");
116 return false;
117 }
118
119 // will unmap all pages containing the range, even if they are not fully contained
120 const ptr_t range_start = ALIGN_DOWN_TO_PAGE(addr);
121 const ptr_t range_end = ALIGN_UP_TO_PAGE(addr + size);
122
123 const size_t start_pgoff = (range_start - whole_map->vaddr) / MOS_PAGE_SIZE;
124 const size_t end_pgoff = (range_end - whole_map->vaddr) / MOS_PAGE_SIZE;
125
126 vmap_t *const range_map = vmap_split_for_range(whole_map, start_pgoff, end_pgoff);
127 if (unlikely(!range_map))
128 {
129 pr_warn("munmap: could not split the vmap");
130 spinlock_release(&current_process->mm->mm_lock);
131 spinlock_release(&whole_map->lock);
132 return false;
133 }
134
135 vmap_destroy(range_map);
136 spinlock_release(&current_process->mm->mm_lock);
137 spinlock_release(&whole_map->lock);
138 return true;
139}
140
141bool vm_protect(MMContext *mmctx, ptr_t addr, size_t size, VMFlags perm)
142{
143 MOS_ASSERT(addr % MOS_PAGE_SIZE == 0);
145
146 spinlock_acquire(&mmctx->mm_lock);
147 vmap_t *const first_part = vmap_obtain(mmctx, addr);
148 const size_t addr_pgoff = (addr - first_part->vaddr) / MOS_PAGE_SIZE;
149
150 //
151 // first | second | third
152 // ^ ^
153 // | |
154 // addr addr + size
155 //
156
157 vmap_t *const to_protect = __extension__({
158 vmap_t *vmap = first_part;
159 // if 'addr_pgoff' is 0, then the first part is the one we want to protect,
160 // otherwise we need to split it to get the vmap that starts at 'addr'
161 if (addr_pgoff)
162 {
163 vmap = vmap_split(first_part, addr_pgoff);
164 spinlock_release(&first_part->lock); // release the lock on the first part, we don't need it
165 }
166
167 vmap;
168 });
169
170 const size_t size_pgoff = size / MOS_PAGE_SIZE;
171 if (size_pgoff < to_protect->npages)
172 {
173 // if there is a third part
174 vmap_t *const part3 = vmap_split(to_protect, size_pgoff);
175 spinlock_release(&part3->lock); // release the lock on the third part, we don't need it
176 }
177
178 if (to_protect->io)
179 {
180 if (!to_protect->io->VerifyMMapPermissions(perm, to_protect->type == VMAP_TYPE_PRIVATE))
181 {
182 spinlock_release(&to_protect->lock); // permission denied
183 spinlock_release(&mmctx->mm_lock);
184 return false;
185 }
186 }
187
188 const bool read_lost = to_protect->vmflags.test(VM_READ) && !perm.test(VM_READ); // if we lose read permission
189 const bool write_lost = to_protect->vmflags.test(VM_WRITE) && !perm.test(VM_WRITE); // if we lose write permission
190 const bool exec_lost = to_protect->vmflags.test(VM_EXEC) && !perm.test(VM_EXEC); // if we lose exec permission
191
192 VMFlags mask = VM_NONE;
193 if (read_lost)
194 {
195 mask |= VM_READ;
196 pr_warn("read permission lost, this is not supported yet");
197 }
198
199 if (write_lost)
200 mask |= VM_WRITE;
201 if (exec_lost)
202 mask |= VM_EXEC;
203
204 // remove permissions immediately
205 mm_do_mask_flags(mmctx->pgd, to_protect->vaddr, to_protect->npages, mask);
206
207 // do not add permissions immediately, we will let the page fault handler do it
208 // e.g. write permission granted only when the page is written to (and proper e.g. CoW)
209 // can be done.
210
211 // let page fault handler do the real flags update
212 to_protect->vmflags = perm | VM_USER;
213
214 spinlock_release(&to_protect->lock);
215 spinlock_release(&mmctx->mm_lock);
216 return true;
217}
#define MOS_ASSERT(cond)
Definition assert.hpp:19
#define MOS_PAGE_SIZE
Definition autoconf.h:6
#define MOS_ADDR_USER_MMAP
Definition autoconf.h:2
PtrResult< vmap_t > cow_allocate_zeroed_pages(MMContext *handle, size_t npages, ptr_t vaddr, VMFlags flags, bool exact=false)
Allocate zero-on-demand pages at a specific address.
Definition cow.cpp:81
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
Definition mm.cpp:254
vmap_type_t
Definition mm.hpp:32
vmap_t * vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset=nullptr)
Get the vmap object for a virtual address.
Definition mm.cpp:189
vmap_t * vmap_split(vmap_t *vmap, size_t split)
Split a vmap object into two, at the specified offset.
Definition mm.cpp:209
void mm_lock_context_pair(MMContext *ctx1, MMContext *ctx2=nullptr)
Lock and unlock a pair of MMContext objects.
Definition mm.cpp:87
void mm_unlock_context_pair(MMContext *ctx1, MMContext *ctx2=nullptr)
Definition mm.cpp:105
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
Definition mm.cpp:168
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
Definition mm.cpp:231
@ VMAP_TYPE_PRIVATE
Definition mm.hpp:33
@ VMAP_TYPE_SHARED
Definition mm.hpp:34
@ VMAP_MMAP
Definition mm.hpp:27
@ VMAP_FILE
Definition mm.hpp:26
PtrResult< vmap_t > mm_get_free_vaddr_locked(MMContext *mmctx, size_t n_pages, ptr_t base_vaddr, bool exact)
Gets npages unmapped free pages from a page table.
Definition paging.cpp:18
@ MMAP_PRIVATE
Definition mm_types.h:17
@ MMAP_EXACT
Definition mm_types.h:16
@ MMAP_SHARED
Definition mm_types.h:18
@ VM_READ
Definition mm_types.hpp:13
@ VM_NONE
Definition mm_types.hpp:12
@ VM_EXEC
Definition mm_types.hpp:15
@ VM_WRITE
Definition mm_types.hpp:14
@ VM_USER
Definition mm_types.hpp:17
bool munmap(ptr_t addr, size_t size)
Unmap a page from the current process's address space.
Definition mmap.cpp:108
static bool mmap_verify_arguments(ptr_t *hint_addr, MMapFlags mmap_flags)
Check if the mmap flags are valid.
Definition mmap.cpp:23
ptr_t mmap_file(MMContext *ctx, ptr_t hint_addr, MMapFlags flags, VMFlags VMFlags, size_t n_pages, IO *io, off_t offset)
Map a file into the current process's address space.
Definition mmap.cpp:71
ptr_t mmap_anonymous(MMContext *ctx, ptr_t hint_addr, MMapFlags flags, VMFlags VMFlags, size_t n_pages)
Map a page into the current process's address space.
Definition mmap.cpp:54
bool vm_protect(MMContext *mmctx, ptr_t addr, size_t size, VMFlags perm)
Change the permissions of a mapping.
Definition mmap.cpp:141
#define ALIGN_UP_TO_PAGE(addr)
Definition mos_global.h:76
#define ALIGN_DOWN_TO_PAGE(addr)
Definition mos_global.h:77
#define unlikely(x)
Definition mos_global.h:40
#define current_process
Definition platform.hpp:34
#define pr_warn(fmt,...)
Definition printk.hpp:38
#define pr_dinfo2(feat, fmt,...)
Definition printk.hpp:27
size_t size
Definition slab.cpp:32
#define spinlock_acquire(lock)
Definition spinlock.hpp:64
#define spinlock_release(lock)
Definition spinlock.hpp:65
Definition io.hpp:39
virtual bool VerifyMMapPermissions(VMFlags flags, bool is_private) final
Definition io.cpp:142
bool map(vmap_t *vmap, off_t offset)
Definition io.cpp:172
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
Definition platform.hpp:63
pgd_t pgd
Definition platform.hpp:64
Definition mm.hpp:60
ptr_t vaddr
Definition mm.hpp:64
VMFlags vmflags
Definition mm.hpp:66
size_t npages
Definition mm.hpp:65
IO * io
Definition mm.hpp:69
spinlock_t lock
Definition mm.hpp:62
vmap_type_t type
Definition mm.hpp:73
void mm_do_mask_flags(pgd_t max, ptr_t vaddr, size_t n_pages, VMFlags to_remove)
Definition table_ops.cpp:51
ssize_t off_t
Definition types.h:80
#define PTR_FMT
Definition types.h:29
unsigned long ptr_t
Definition types.h:21