MOS Source Code
Loading...
Searching...
No Matches
mm.cpp
Go to the documentation of this file.
1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/mm/mm.hpp"
4
7#include "mos/misc/setup.hpp"
13#include "mos/platform/platform_defs.hpp"
14#include "mos/syslog/printk.hpp"
15#include "mos/tasks/signal.hpp"
16
19#include <mos/mos_global.h>
20#include <mos_stdlib.hpp>
21#include <mos_string.hpp>
22
23#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
24#include "mos/tasks/process.hpp"
25#endif
26
28{
30 if (!frame)
31 {
32 pr_emerg("failed to allocate a page");
33 return NULL;
34 }
35
36 return frame;
37}
38
40{
42 if (!frame)
43 return NULL;
44 memzero((void *) phyframe_va(frame), MOS_PAGE_SIZE);
45 return frame;
46}
47
49{
51 if (!frame)
52 {
53 pr_emerg("failed to allocate %zd pages", npages);
54 return NULL;
55 }
56
57 return frame;
58}
59
61{
63 linked_list_init(&mmctx->mmaps);
64
65 pml4_t pml4 = pml_create_table(pml4);
66
67 // map the upper half of the address space to the kernel
68 for (int i = pml4_index(MOS_KERNEL_START_VADDR); i < PML4_ENTRIES; i++)
69 pml4.table[i] = platform_info->kernel_mm->pgd.max.next.table[i];
70
71 mmctx->pgd = pgd_create(pml4);
72
73 return mmctx;
74}
75
77{
78 MOS_ASSERT(mmctx != platform_info->kernel_mm); // you can't destroy the kernel mmctx
80
81 ptr_t zero = 0;
82 size_t userspace_npages = (MOS_USER_END_VADDR + 1) / MOS_PAGE_SIZE;
83 const bool freed = pml5_destroy_range(mmctx->pgd.max, &zero, &userspace_npages);
84 MOS_ASSERT_X(freed, "failed to free the entire userspace");
85 delete mmctx;
86}
87
89{
90 if (ctx1 == ctx2 || ctx2 == NULL)
92 else if (ctx1 < ctx2)
93 {
96 }
97 else
98 {
101 }
102}
103
105{
106 if (ctx1 == ctx2 || ctx2 == NULL)
108 else if (ctx1 < ctx2)
109 {
112 }
113 else
114 {
117 }
118}
119
121{
122 MMContext *old_ctx = current_cpu->mm_context;
123 if (old_ctx == new_ctx)
124 return old_ctx;
125
126 platform_switch_mm(new_ctx);
127 current_cpu->mm_context = new_ctx;
128 return old_ctx;
129}
130
131static void do_attach_vmap(MMContext *mmctx, vmap_t *vmap)
132{
134 MOS_ASSERT_X(list_is_empty(list_node(vmap)), "vmap is already attached to something");
135 MOS_ASSERT(vmap->mmctx == NULL || vmap->mmctx == mmctx);
136
137 vmap->mmctx = mmctx;
138
139 // add to the list, sorted by address
140 list_foreach(vmap_t, m, mmctx->mmaps)
141 {
142 if (m->vaddr > vmap->vaddr)
143 {
144 list_insert_before(m, vmap);
145 return;
146 }
147 }
148
149 list_node_append(&mmctx->mmaps, list_node(vmap)); // append at the end
150}
151
152vmap_t *vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages)
153{
154 MOS_ASSERT_X(mmctx != platform_info->kernel_mm, "you can't create vmaps in the kernel mmctx");
157 spinlock_acquire(&map->lock);
158 map->vaddr = vaddr;
159 map->npages = npages;
160 do_attach_vmap(mmctx, map);
161 return map;
162}
163
165{
167 MMContext *const mm = vmap->mmctx;
169 if (vmap->io)
170 {
171 bool unmapped = false;
172 if (!io_munmap(vmap->io, vmap, &unmapped))
173 pr_warn("munmap: could not unmap the file: io_munmap() failed");
174
175 if (unmapped)
176 goto unmapped;
177 }
178 mm_do_unmap(mm->pgd, vmap->vaddr, vmap->npages, true);
179
180unmapped:
181 list_remove(vmap);
182 delete vmap;
183}
184
185vmap_t *vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset)
186{
188
189 list_foreach(vmap_t, m, mmctx->mmaps)
190 {
191 if (m->vaddr <= vaddr && vaddr < m->vaddr + m->npages * MOS_PAGE_SIZE)
192 {
193 spinlock_acquire(&m->lock);
194 if (out_offset)
195 *out_offset = vaddr - m->vaddr;
196 return m;
197 }
198 }
199
200 if (out_offset)
201 *out_offset = 0;
202 return NULL;
203}
204
205vmap_t *vmap_split(vmap_t *first, size_t split)
206{
208 MOS_ASSERT(split && split < first->npages);
209
210 vmap_t *second = mos::create<vmap_t>();
211 *second = *first; // copy the whole structure
212 linked_list_init(list_node(second)); // except for the list node
213
214 first->npages = split; // shrink the first vmap
215 second->npages -= split;
216 second->vaddr += split * MOS_PAGE_SIZE;
217 if (first->io)
218 {
219 second->io = io_ref(first->io); // ref the io again
220 second->io_offset += split * MOS_PAGE_SIZE;
221 }
222
223 do_attach_vmap(first->mmctx, second);
224 return second;
225}
226
227vmap_t *vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
228{
230
234
235 if (rstart_pgoff == 0 && rend_pgoff == vmap->npages)
236 return vmap;
237
238 if (rstart_pgoff == 0)
239 return vmap_split(vmap, rend_pgoff);
240
241 if (rend_pgoff == vmap->npages)
242 return vmap_split(vmap, rstart_pgoff);
243
244 vmap_t *second = vmap_split(vmap, rstart_pgoff);
245 vmap_t *third = vmap_split(second, rend_pgoff - rstart_pgoff);
246 spinlock_release(&third->lock);
247 return second;
248}
249
251{
253 MOS_ASSERT_X(content != VMAP_UNKNOWN, "vmap content cannot be unknown");
254 MOS_ASSERT_X(vmap->content == VMAP_UNKNOWN || vmap->content == content, "vmap is already setup");
255
256 vmap->content = content;
257 vmap->type = type;
258 spinlock_release(&vmap->lock);
259}
260
261void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
262{
263 memcpy((void *) phyframe_va(dst), (void *) phyframe_va(src), MOS_PAGE_SIZE);
264}
265
267{
269 MOS_ASSERT(info->is_write && info->is_present);
270
271 // fast path to handle CoW
273 mm_copy_page(info->faulting_page, page);
274 mm_replace_page_locked(vmap->mmctx, fault_addr, phyframe_pfn(page), vmap->vmflags);
275
276 return VMFAULT_COMPLETE;
277}
278
279static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
280{
281 pr_emerg("unhandled page fault: %s", unhandled_reason);
282#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
283 pr_emerg(" invalid %s mode %s %s page [" PTR_FMT "]", //
284 info->is_user ? "user" : "kernel", //
285 info->is_write ? "write to" : (info->is_exec ? "execute in" : "read from"), //
286 info->is_present ? "present" : "non-present", //
287 fault_addr //
288 );
289
290 pr_emerg(" instruction: " PTR_FMT, info->ip);
291 if (ip_vmap)
292 {
293 pr_emerg(" vmap: %pvm", (void *) ip_vmap);
294 pr_emerg(" offset: 0x%zx", info->ip - ip_vmap->vaddr + (ip_vmap->io ? ip_vmap->io_offset : 0));
295 }
296
297 pr_emerg(" thread: %pt", current_thread);
298 pr_emerg(" process: %pp", current_thread ? current_process : nullptr);
299
300 if (fault_addr < 1 KB)
301 {
302 if (info->is_write)
303 pr_emerg(" possible write to NULL pointer");
304 else if (info->is_exec && fault_addr == 0)
305 pr_emerg(" attempted to execute NULL pointer");
306 else
307 pr_emerg(" possible NULL pointer dereference");
308 }
309
310 if (info->is_user && fault_addr > MOS_KERNEL_START_VADDR)
311 pr_emerg(" kernel address dereference");
312
313 if (info->ip > MOS_KERNEL_START_VADDR)
314 pr_emerg(" in kernel function %ps", (void *) info->ip);
315
316 if (faulting_vmap)
317 {
318 pr_emerg(" in vmap: %pvm", (void *) faulting_vmap);
319 pr_emerg(" offset: 0x%zx", fault_addr - faulting_vmap->vaddr + (faulting_vmap->io ? faulting_vmap->io_offset : 0));
320 }
321
322 if (faulting_vmap)
323 spinlock_release(&faulting_vmap->lock);
324
325 if (ip_vmap)
326 spinlock_release(&ip_vmap->lock);
327
328 if (current_thread)
329 spinlock_release(&current_thread->owner->mm->mm_lock);
330
331#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
332 if (current_thread)
334#endif
335
336 pr_info("stack trace before fault (may be unreliable):");
338
339 pr_info("register states before fault:");
341 pr_cont("\n");
342#else
343 MOS_UNUSED(fault_addr);
344 MOS_UNUSED(info);
345#endif
346
347 if (current_thread)
348 {
351 }
352 else
353 {
354 MOS_ASSERT(!"unhandled kernel page fault");
355 }
356}
357
358void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
359{
360 const char *unhandled_reason = NULL;
361
362 pr_demph(pagefault, "%s #PF: %pt, %pp, IP=" PTR_VLFMT ", ADDR=" PTR_VLFMT, //
363 info->is_user ? "user" : "kernel", //
365 current_thread ? current_thread->owner : NULL, //
366 info->ip, //
367 fault_addr //
368 );
369
370 if (info->is_write && info->is_exec)
371 mos_panic("Cannot write and execute at the same time");
372
373 size_t offset = 0;
374 vmap_t *fault_vmap = NULL;
375 vmap_t *ip_vmap = NULL;
376
377 const auto DoUnhandledPageFault = [&]()
378 {
379 // if we get here, the fault was not handled
380 MOS_ASSERT_X(unhandled_reason, "unhandled fault with no reason");
381 invalid_page_fault(fault_addr, fault_vmap, ip_vmap, info, unhandled_reason);
382 };
383
384 if (!current_mm)
385 {
386 unhandled_reason = "no mm context";
387 DoUnhandledPageFault();
388 return;
389 }
390
391 MMContext *const mm = current_mm;
393
394 fault_vmap = vmap_obtain(mm, fault_addr, &offset);
395 if (!fault_vmap)
396 {
397 ip_vmap = vmap_obtain(mm, info->ip, NULL);
398 unhandled_reason = "page fault in unmapped area";
400 DoUnhandledPageFault();
401 return;
402 }
403 ip_vmap = MOS_IN_RANGE(info->ip, fault_vmap->vaddr, fault_vmap->vaddr + fault_vmap->npages * MOS_PAGE_SIZE) ? fault_vmap : vmap_obtain(mm, info->ip, NULL);
404
405 MOS_ASSERT_X(fault_vmap->on_fault, "vmap %pvm has no fault handler", (void *) fault_vmap);
406 const vm_flags page_flags = mm_do_get_flags(fault_vmap->mmctx->pgd, fault_addr);
407
408 if (info->is_exec && !(fault_vmap->vmflags & VM_EXEC))
409 {
410 unhandled_reason = "page fault in non-executable vmap";
412 DoUnhandledPageFault();
413 return;
414 }
415 else if (info->is_present && info->is_exec && fault_vmap->vmflags & VM_EXEC && !(page_flags & VM_EXEC))
416 {
417 // vmprotect has been called on this vmap to enable execution
418 // we need to make sure that the page is executable
419 mm_do_flag(fault_vmap->mmctx->pgd, fault_addr, 1, page_flags | VM_EXEC);
421 spinlock_release(&fault_vmap->lock);
422 if (ip_vmap)
423 spinlock_release(&ip_vmap->lock);
424 return;
425 }
426
427 if (info->is_write && !(fault_vmap->vmflags & VM_WRITE))
428 {
429 unhandled_reason = "page fault in read-only vmap";
431 DoUnhandledPageFault();
432 return;
433 }
434
435 if (info->is_present)
436 info->faulting_page = pfn_phyframe(mm_do_get_pfn(fault_vmap->mmctx->pgd, fault_addr));
437
438 const auto get_fault_result = [](vmfault_result_t result)
439 {
440 switch (result)
441 {
442 case VMFAULT_COMPLETE: return "COMPLETE";
443 case VMFAULT_MAP_BACKING_PAGE_RO: return "MAP_BACKING_PAGE_RO";
444 case VMFAULT_MAP_BACKING_PAGE: return "MAP_BACKING_PAGE";
445 case VMFAULT_COPY_BACKING_PAGE: return "COPY_BACKING_PAGE";
446 case VMFAULT_CANNOT_HANDLE: return "CANNOT_HANDLE";
447 default: return "UNKNOWN";
448 };
449 };
450
451 pr_dcont(pagefault, ", handler %ps", (void *) (ptr_t) fault_vmap->on_fault);
452 vmfault_result_t fault_result = fault_vmap->on_fault(fault_vmap, fault_addr, info);
453 pr_dcont(pagefault, " -> %s", get_fault_result(fault_result));
454
455 vm_flags map_flags = fault_vmap->vmflags;
456 switch (fault_result)
457 {
458 case VMFAULT_COMPLETE: break;
460 {
461 unhandled_reason = "vmap fault handler returned VMFAULT_CANNOT_HANDLE";
462 DoUnhandledPageFault();
463 return;
464 }
466 {
468 const phyframe_t *page = mm_get_free_page(); // will be ref'd by mm_replace_page_locked()
469 mm_copy_page(info->backing_page, page);
470 info->backing_page = page;
471 goto map_backing_page;
472 }
474 {
475 map_flags &= ~VM_WRITE;
476 goto map_backing_page;
477 }
479 {
480 map_backing_page:
481 if (!info->backing_page)
482 {
483 unhandled_reason = "out of memory";
485 DoUnhandledPageFault();
486 return;
487 }
488
489 pr_dcont(pagefault, " (backing page: " PFN_FMT ")", phyframe_pfn(info->backing_page));
490 mm_replace_page_locked(fault_vmap->mmctx, fault_addr, phyframe_pfn(info->backing_page), map_flags);
491 fault_result = VMFAULT_COMPLETE;
492 }
493 }
494
495 MOS_ASSERT_X(fault_result == VMFAULT_COMPLETE || fault_result == VMFAULT_CANNOT_HANDLE, "invalid fault result %d", fault_result);
496 if (ip_vmap)
497 spinlock_release(&ip_vmap->lock);
498 spinlock_release(&fault_vmap->lock);
501 if (fault_result == VMFAULT_COMPLETE)
502 return;
503
504 DoUnhandledPageFault();
505}
506
507// ! sysfs support
508
509static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
510{
511 MOS_UNUSED(f);
512 // pr_info("mem: mapping " PTR_VLFMT " to " PTR_VLFMT "\n", vmap->vaddr, offset);
513 mm_do_map(vmap->mmctx->pgd, vmap->vaddr, offset / MOS_PAGE_SIZE, vmap->npages, vmap->vmflags, false);
514 return true;
515}
516
517static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
518{
519 MOS_UNUSED(f);
520 mm_do_unmap(vmap->mmctx->pgd, vmap->vaddr, vmap->npages, false);
521 *unmapped = true;
522 return true;
523}
524
526
527static void mm_sysfs_init()
528{
529 sys_mem_item.mem.size = platform_info->max_pfn * MOS_PAGE_SIZE;
531}
532
#define MOS_ASSERT_X(cond, msg,...)
Definition assert.hpp:15
#define MOS_ASSERT(cond)
Definition assert.hpp:14
#define MOS_PAGE_SIZE
Definition autoconf.h:6
void signal_exit_to_user_prepare(platform_regs_t *regs)
Prepare to exit to userspace.
Definition signal.cpp:240
long signal_send_to_thread(Thread *target, signal_t signal)
Send a signal to a thread.
Definition signal.cpp:87
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
Definition list.cpp:15
MOSAPI void list_node_append(list_node_t *head, list_node_t *item)
Definition list.cpp:68
#define list_foreach(t, v, h)
Iterate over a list.
Definition list.hpp:89
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
Definition list.hpp:74
#define list_insert_before(element, item)
Definition list.hpp:78
MOSAPI bool list_is_empty(const list_node_t *head)
Definition list.cpp:21
#define list_remove(element)
Definition list.hpp:80
MMContext * mm_switch_context(MMContext *new_ctx)
Definition mm.cpp:120
#define phyframe_va(frame)
Definition mm.hpp:80
void mm_lock_ctx_pair(MMContext *ctx1, MMContext *ctx2)
Lock and unlock a pair of MMContext objects.
Definition mm.cpp:88
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
Definition mm.cpp:250
vmfault_result_t
Definition mm.hpp:46
vmap_type_t
Definition mm.hpp:31
phyframe_t * mm_get_free_page(void)
Definition mm.cpp:39
vmap_t * vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
Definition mm.cpp:185
phyframe_t * mm_get_free_page_raw(void)
Definition mm.cpp:27
vmap_t * vmap_split(vmap_t *first, size_t split)
Split a vmap object into two, at the specified offset.
Definition mm.cpp:205
void mm_destroy_context(MMContext *mmctx)
Destroy a user-mode platform-dependent page table.
Definition mm.cpp:76
vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
Helper function to resolve a copy-on-write fault.
Definition mm.cpp:266
void mm_unlock_ctx_pair(MMContext *ctx1, MMContext *ctx2)
Definition mm.cpp:104
MMContext * mm_create_context(void)
Create a user-mode platform-dependent page table.
Definition mm.cpp:60
void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
Handle a page fault.
Definition mm.cpp:358
phyframe_t * mm_get_free_pages(size_t npages)
Definition mm.cpp:48
vmap_t * vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
Definition mm.cpp:152
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
Definition mm.cpp:164
vmap_content_t
Definition mm.hpp:22
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
Definition mm.cpp:227
@ VMFAULT_COPY_BACKING_PAGE
the caller should copy the backing page into the faulting address
Definition mm.hpp:50
@ VMFAULT_MAP_BACKING_PAGE
the caller should map the backing page into the faulting address
Definition mm.hpp:49
@ VMFAULT_COMPLETE
no further action is needed, the page is correctly mapped now
Definition mm.hpp:47
@ VMFAULT_CANNOT_HANDLE
the handler cannot handle this fault
Definition mm.hpp:51
@ VMFAULT_MAP_BACKING_PAGE_RO
the caller should map the backing page into the faulting address, and mark it non-writable
Definition mm.hpp:48
@ VMAP_UNKNOWN
Definition mm.hpp:23
void mm_replace_page_locked(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
Replace the mappings of a page with a new physical frame.
Definition paging.cpp:113
#define pfn_phyframe(pfn)
Definition pmm.hpp:74
#define phyframe_pfn(frame)
Definition pmm.hpp:73
phyframe_t * pmm_allocate_frames(size_t n_frames, pmm_allocation_flags_t flags)
Allocate n_frames of contiguous physical memory.
Definition pmm.cpp:92
@ PMM_ALLOC_NORMAL
allocate normal pages
Definition pmm.hpp:68
bool io_munmap(io_t *io, vmap_t *vmap, bool *unmapped)
Definition io.cpp:274
io_t * io_ref(io_t *io)
Definition io.cpp:74
@ IPI_TYPE_INVALIDATE_TLB
Definition ipi.hpp:15
void ipi_send_all(ipi_type_t type)
static sysfs_item_t sys_mem_item
Definition mm.cpp:525
static void mm_sysfs_init()
Definition mm.cpp:527
static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
Definition mm.cpp:279
static void do_attach_vmap(MMContext *mmctx, vmap_t *vmap)
Definition mm.cpp:131
static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
Definition mm.cpp:517
static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
Definition mm.cpp:509
void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
Definition mm.cpp:261
#define MOS_IN_RANGE(addr, start, end)
Definition mos_global.h:79
#define KB
Definition mos_global.h:98
#define MOS_UNUSED(x)
Definition mos_global.h:65
T * create(Args &&...args)
Definition allocator.hpp:10
#define mos_panic(fmt,...)
Definition panic.hpp:51
static void * memcpy(void *s1, const void *s2, size_t n)
Definition pb_syshdr.h:90
#define NULL
Definition pb_syshdr.h:46
#define current_thread
Definition platform.hpp:32
#define current_mm
Definition platform.hpp:34
vm_flags
Definition platform.hpp:42
@ VM_EXEC
Definition platform.hpp:46
@ VM_WRITE
Definition platform.hpp:45
#define current_cpu
Definition platform.hpp:31
#define current_process
Definition platform.hpp:33
__nodiscard bool pml5_destroy_range(pml5_t pml5, ptr_t *vaddr, size_t *n_pages)
Definition pml5.cpp:17
#define pgd_create(top)
Definition pml_types.hpp:89
#define pml_create_table(x)
#define pr_warn(fmt,...)
Definition printk.hpp:38
#define pr_emerg(fmt,...)
Definition printk.hpp:39
#define pr_info(fmt,...)
Definition printk.hpp:35
#define pr_demph(feat, fmt,...)
Definition printk.hpp:29
#define pr_dcont(feat, fmt,...)
Definition printk.hpp:33
#define pr_cont(fmt,...)
Definition printk.hpp:41
void process_dump_mmaps(const Process *process)
Definition process.cpp:368
#define MOS_KERNEL_START_VADDR
#define MOS_USER_END_VADDR
#define PML4_ENTRIES
mos_platform_info_t *const platform_info
void platform_dump_regs(platform_regs_t *regs)
void platform_switch_mm(const MMContext *new_mm)
void platform_dump_stack(platform_regs_t *regs)
#define memzero(ptr, size)
#define MOS_INIT(_comp, _fn)
Definition setup.hpp:38
should_inline bool spinlock_is_locked(const spinlock_t *lock)
Definition spinlock.hpp:71
#define spinlock_acquire(lock)
Definition spinlock.hpp:64
#define spinlock_release(lock)
Definition spinlock.hpp:65
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
Definition platform.hpp:86
list_head mmaps
Definition platform.hpp:88
pgd_t pgd
Definition platform.hpp:87
bool is_exec
Definition mm.hpp:38
ptr_t ip
the instruction pointer which caused the fault
Definition mm.hpp:39
bool is_present
Definition mm.hpp:38
platform_regs_t * regs
the registers of the moment that caused the fault
Definition mm.hpp:40
phyframe_t * faulting_page
the frame that contains the copy-on-write data (if any)
Definition mm.hpp:41
bool is_user
Definition mm.hpp:38
const phyframe_t * backing_page
the frame that contains the data for this page, the on_fault handler should set this
Definition mm.hpp:42
bool is_write
Definition mm.hpp:38
Definition mm.hpp:59
vmap_content_t content
Definition mm.hpp:71
ptr_t vaddr
Definition mm.hpp:63
size_t npages
Definition mm.hpp:64
vmfault_handler_t on_fault
Definition mm.hpp:74
vm_flags vmflags
Definition mm.hpp:65
spinlock_t lock
Definition mm.hpp:61
io_t * io
Definition mm.hpp:68
MMContext * mmctx
Definition mm.hpp:66
off_t io_offset
Definition mm.hpp:69
vmap_type_t type
Definition mm.hpp:72
should_inline void sysfs_register_root_file(sysfs_item_t *item)
Register an entry in the sysfs root directory.
Definition sysfs.hpp:94
#define SYSFS_MEM_ITEM(_name, _mmap_fn, _munmap_fn)
Definition sysfs.hpp:45
vm_flags mm_do_get_flags(pgd_t max, ptr_t vaddr)
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, vm_flags flags, bool do_refcount)
Definition table_ops.cpp:23
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
Definition table_ops.cpp:67
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, vm_flags flags)
Definition table_ops.cpp:29
void mm_do_unmap(pgd_t top, ptr_t vaddr, size_t n_pages, bool do_unref)
Definition table_ops.cpp:35
#define PTR_VLFMT
Definition types.h:30
ssize_t off_t
Definition types.h:80
#define PFN_FMT
Definition types.h:38
#define PTR_FMT
Definition types.h:29
unsigned long ptr_t
Definition types.h:21