MOS Source Code
Loading...
Searching...
No Matches
mm.cpp
Go to the documentation of this file.
1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/mm/mm.hpp"
4
7#include "mos/misc/setup.hpp"
13#include "mos/platform/platform_defs.hpp"
14#include "mos/tasks/signal.hpp"
15
18#include <mos/mos_global.h>
19#include <mos_stdlib.hpp>
20#include <mos_string.hpp>
21
22#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
23#include "mos/tasks/process.hpp"
24#endif
25
27{
29 if (!frame)
30 {
31 mEmerg << "failed to allocate a page";
32 return NULL;
33 }
34
35 return frame;
36}
37
39{
41 if (!frame)
42 return NULL;
43 memzero((void *) phyframe_va(frame), MOS_PAGE_SIZE);
44 return frame;
45}
46
48{
50 if (!frame)
51 {
52 mEmerg << "failed to allocate " << npages << " pages";
53 return NULL;
54 }
55
56 return frame;
57}
58
60{
62 linked_list_init(&mmctx->mmaps);
63
64 pml4_t pml4 = pml_create_table(pml4);
65
66 // map the upper half of the address space to the kernel
67 for (int i = pml4_index(MOS_KERNEL_START_VADDR); i < PML4_ENTRIES; i++)
68 pml4.table[i] = platform_info->kernel_mm->pgd.max.next.table[i];
69
70 mmctx->pgd = pgd_create(pml4);
71
72 return mmctx;
73}
74
76{
77 MOS_ASSERT(mmctx != platform_info->kernel_mm); // you can't destroy the kernel mmctx
79
80 ptr_t zero = 0;
81 size_t userspace_npages = (MOS_USER_END_VADDR + 1) / MOS_PAGE_SIZE;
82 const bool freed = pml5_destroy_range(mmctx->pgd.max, &zero, &userspace_npages);
83 MOS_ASSERT_X(freed, "failed to free the entire userspace");
84 delete mmctx;
85}
86
88{
89 MMContext *ctx1 = ctx1_;
90 MMContext *ctx2 = ctx2_;
91
92 if (ctx1 > ctx2)
93 std::swap(ctx1, ctx2);
94
95 // ctx1 <= ctx2
96 if (ctx1 == NULL || ctx1 == ctx2)
98 else
99 {
102 }
103}
104
106{
107 MMContext *ctx1 = ctx1_;
108 MMContext *ctx2 = ctx2_;
109
110 if (ctx1 > ctx2)
111 std::swap(ctx1, ctx2);
112
113 // ctx1 <= ctx2
114 if (ctx1 == NULL || ctx1 == ctx2)
116 else
117 {
118 // note that we release in reverse order
121 }
122}
123
125{
126 MMContext *old_ctx = current_cpu->mm_context;
127 if (old_ctx == new_ctx)
128 return old_ctx;
129
130 platform_switch_mm(new_ctx);
131 current_cpu->mm_context = new_ctx;
132 return old_ctx;
133}
134
135static void do_attach_vmap(MMContext *mmctx, vmap_t *vmap)
136{
138 MOS_ASSERT_X(list_is_empty(list_node(vmap)), "vmap is already attached to something");
139 MOS_ASSERT(vmap->mmctx == NULL || vmap->mmctx == mmctx);
140
141 vmap->mmctx = mmctx;
142
143 // add to the list, sorted by address
144 list_foreach(vmap_t, m, mmctx->mmaps)
145 {
146 if (m->vaddr > vmap->vaddr)
147 {
148 list_insert_before(m, vmap);
149 return;
150 }
151 }
152
153 list_node_append(&mmctx->mmaps, list_node(vmap)); // append at the end
154}
155
156vmap_t *vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages)
157{
158 MOS_ASSERT_X(mmctx != platform_info->kernel_mm, "you can't create vmaps in the kernel mmctx");
161 spinlock_acquire(&map->lock);
162 map->vaddr = vaddr;
163 map->npages = npages;
164 do_attach_vmap(mmctx, map);
165 return map;
166}
167
169{
171 MMContext *const mm = vmap->mmctx;
173 if (vmap->io)
174 {
175 bool unmapped = false;
176 if (!vmap->io->unmap(vmap, &unmapped))
177 mWarn << "munmap: could not unmap the file: io_munmap() failed";
178
179 if (unmapped)
180 goto unmapped;
181 }
182 mm_do_unmap(mm->pgd, vmap->vaddr, vmap->npages, true);
183
184unmapped:
185 list_remove(vmap);
186 delete vmap;
187}
188
189vmap_t *vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset)
190{
192
193 list_foreach(vmap_t, m, mmctx->mmaps)
194 {
195 if (m->vaddr <= vaddr && vaddr < m->vaddr + m->npages * MOS_PAGE_SIZE)
196 {
197 spinlock_acquire(&m->lock);
198 if (out_offset)
199 *out_offset = vaddr - m->vaddr;
200 return m;
201 }
202 }
203
204 if (out_offset)
205 *out_offset = 0;
206 return NULL;
207}
208
209vmap_t *vmap_split(vmap_t *first, size_t split)
210{
212 MOS_ASSERT(split && split < first->npages);
213
214 vmap_t *second = mos::create<vmap_t>();
215 *second = *first; // copy the whole structure
216 linked_list_init(list_node(second)); // except for the list node
217
218 first->npages = split; // shrink the first vmap
219 second->npages -= split;
220 second->vaddr += split * MOS_PAGE_SIZE;
221 if (first->io)
222 {
223 second->io = first->io->ref(); // ref the io again
224 second->io_offset += split * MOS_PAGE_SIZE;
225 }
226
227 do_attach_vmap(first->mmctx, second);
228 return second;
229}
230
231vmap_t *vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
232{
234
238
239 if (rstart_pgoff == 0 && rend_pgoff == vmap->npages)
240 return vmap;
241
242 if (rstart_pgoff == 0)
243 return vmap_split(vmap, rend_pgoff);
244
245 if (rend_pgoff == vmap->npages)
246 return vmap_split(vmap, rstart_pgoff);
247
248 vmap_t *second = vmap_split(vmap, rstart_pgoff);
249 vmap_t *third = vmap_split(second, rend_pgoff - rstart_pgoff);
250 spinlock_release(&third->lock);
251 return second;
252}
253
255{
257 MOS_ASSERT_X(content != VMAP_UNKNOWN, "vmap content cannot be unknown");
258 MOS_ASSERT_X(vmap->content == VMAP_UNKNOWN || vmap->content == content, "vmap is already setup");
259
260 vmap->content = content;
261 vmap->type = type;
262 spinlock_release(&vmap->lock);
263}
264
265void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
266{
267 memcpy((void *) phyframe_va(dst), (void *) phyframe_va(src), MOS_PAGE_SIZE);
268}
269
271{
273 MOS_ASSERT(info->is_write && info->is_present);
274
275 // fast path to handle CoW
277 mm_copy_page(info->faulting_page, page);
278 mm_replace_page_locked(vmap->mmctx, fault_addr, phyframe_pfn(page), vmap->vmflags);
279
280 return VMFAULT_COMPLETE;
281}
282
283static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
284{
285 mEmerg << "unhandled page fault: " << unhandled_reason;
286#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
287 mEmerg << " invalid " //
288 << (info->is_user ? "user" : "kernel") << " mode " //
289 << (info->is_write ? "write to" : (info->is_exec ? "execute in" : "read from")) //
290 << " " << (info->is_present ? "present" : "non-present") << " page [" << (void *) fault_addr << "]";
291
292 mEmerg << " instruction: " << (void *) info->ip;
293 if (ip_vmap)
294 {
295 mEmerg << " vmap: " << ip_vmap;
296 mEmerg << " offset: 0x" << (info->ip - ip_vmap->vaddr + (ip_vmap->io ? ip_vmap->io_offset : 0));
297 }
298
299 mEmerg << " thread: " << current_thread;
300 mEmerg << " process: " << (current_thread ? current_process : nullptr);
301
302 if (fault_addr < 1 KB)
303 {
304 if (info->is_write)
305 mEmerg << " possible write to NULL pointer";
306 else if (info->is_exec && fault_addr == 0)
307 mEmerg << " attempted to execute NULL pointer";
308 else
309 mEmerg << " possible NULL pointer dereference";
310 }
311
312 if (info->is_user && fault_addr > MOS_KERNEL_START_VADDR)
313 mEmerg << " kernel address dereference";
314
315 if (info->ip > MOS_KERNEL_START_VADDR)
316 mEmerg << " in kernel function " << (void *) info->ip;
317
318 if (faulting_vmap)
319 {
320 mEmerg << " in vmap: " << faulting_vmap;
321 mEmerg << " offset: 0x" << (fault_addr - faulting_vmap->vaddr + (faulting_vmap->io ? faulting_vmap->io_offset : 0));
322 }
323#endif
324
325 if (faulting_vmap)
326 spinlock_release(&faulting_vmap->lock);
327
328 if (ip_vmap)
329 spinlock_release(&ip_vmap->lock);
330
331 if (current_thread)
332 spinlock_release(&current_thread->owner->mm->mm_lock);
333
334#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
335#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
336 if (current_thread)
338#endif
339
340 mInfo << "stack trace before fault (may be unreliable):";
342
343 mInfo << "register states before fault:";
345 mCont << "\n";
346#else
347 MOS_UNUSED(faulting_vmap);
348 MOS_UNUSED(ip_vmap);
349 MOS_UNUSED(fault_addr);
350 MOS_UNUSED(info);
351#endif
352
353 if (current_thread)
354 {
356 }
357 else
358 {
359 MOS_ASSERT(!"unhandled kernel page fault");
360 }
361}
362
363void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
364{
365 const char *unhandled_reason = NULL;
366
367 dEmph<pagefault> << (info->is_user ? "user" : "kernel") << " #PF: " //
368 << (current_thread ? current_thread : NULL) << ", " //
369 << (current_thread ? current_thread->owner : NULL) //
370 << ", IP=" << info->ip //
371 << ", ADDR=" << fault_addr;
372
373 if (info->is_write && info->is_exec)
374 mos_panic("Cannot write and execute at the same time");
375
376 size_t offset = 0;
377 vmap_t *fault_vmap = NULL;
378 vmap_t *ip_vmap = NULL;
379
380 const auto DoUnhandledPageFault = [&]()
381 {
382 // if we get here, the fault was not handled
383 MOS_ASSERT_X(unhandled_reason, "unhandled fault with no reason");
384 invalid_page_fault(fault_addr, fault_vmap, ip_vmap, info, unhandled_reason);
385 };
386
387 if (!current_mm)
388 {
389 unhandled_reason = "no mm context";
390 return DoUnhandledPageFault();
391 }
392
393 MMContext *const mm = current_mm;
395
396 fault_vmap = vmap_obtain(mm, fault_addr, &offset);
397 if (!fault_vmap)
398 {
399 ip_vmap = vmap_obtain(mm, info->ip);
400 unhandled_reason = "page fault in unmapped area";
402 return DoUnhandledPageFault();
403 }
404 ip_vmap = MOS_IN_RANGE(info->ip, fault_vmap->vaddr, fault_vmap->vaddr + fault_vmap->npages * MOS_PAGE_SIZE) ? fault_vmap : vmap_obtain(mm, info->ip);
405
406 MOS_ASSERT_X(fault_vmap->on_fault, "vmap %pvm has no fault handler", (void *) fault_vmap);
407 const VMFlags page_flags = mm_do_get_flags(fault_vmap->mmctx->pgd, fault_addr);
408
409 if (info->is_exec && !(fault_vmap->vmflags & VM_EXEC))
410 {
411 unhandled_reason = "page fault in non-executable vmap";
413 return DoUnhandledPageFault();
414 }
415 else if (info->is_present && info->is_exec && fault_vmap->vmflags & VM_EXEC && !(page_flags & VM_EXEC))
416 {
417 // vmprotect has been called on this vmap to enable execution
418 // we need to make sure that the page is executable
419 mm_do_flag(fault_vmap->mmctx->pgd, fault_addr, 1, page_flags | VM_EXEC);
421 spinlock_release(&fault_vmap->lock);
422 if (ip_vmap != fault_vmap && ip_vmap)
423 spinlock_release(&ip_vmap->lock);
424 return;
425 }
426
427 if (info->is_write && !fault_vmap->vmflags.test(VM_WRITE))
428 {
429 unhandled_reason = "page fault in read-only vmap";
431 return DoUnhandledPageFault();
432 }
433
434 if (info->is_present)
435 info->faulting_page = pfn_phyframe(mm_do_get_pfn(fault_vmap->mmctx->pgd, fault_addr));
436
437 const auto get_fault_result = [](vmfault_result_t result)
438 {
439 switch (result)
440 {
441 case VMFAULT_COMPLETE: return "COMPLETE";
442 case VMFAULT_MAP_BACKING_PAGE_RO: return "MAP_BACKING_PAGE_RO";
443 case VMFAULT_MAP_BACKING_PAGE: return "MAP_BACKING_PAGE";
444 case VMFAULT_COPY_BACKING_PAGE: return "COPY_BACKING_PAGE";
445 case VMFAULT_CANNOT_HANDLE: return "CANNOT_HANDLE";
446 default: return "UNKNOWN";
447 };
448 };
449
450 dCont<pagefault> << ", handler " << (void *) (ptr_t) fault_vmap->on_fault;
451 vmfault_result_t fault_result = fault_vmap->on_fault(fault_vmap, fault_addr, info);
452 dCont<pagefault> << " -> " << get_fault_result(fault_result);
453
454 VMFlags map_flags = fault_vmap->vmflags;
455 switch (fault_result)
456 {
457 case VMFAULT_COMPLETE: break;
459 {
460 unhandled_reason = "vmap fault handler returned VMFAULT_CANNOT_HANDLE";
461 return DoUnhandledPageFault();
462 }
464 {
466 const phyframe_t *page = mm_get_free_page(); // will be ref'd by mm_replace_page_locked()
467 mm_copy_page(info->backing_page, page);
468 info->backing_page = page;
469 goto map_backing_page;
470 }
472 {
473 map_flags.erase(VM_WRITE);
474 goto map_backing_page;
475 }
477 {
478 map_backing_page:
479 if (!info->backing_page)
480 {
481 unhandled_reason = "out of memory";
483 return DoUnhandledPageFault();
484 }
485
486 dCont<pagefault> << " (backing page: " << phyframe_pfn(info->backing_page) << ")";
487 mm_replace_page_locked(fault_vmap->mmctx, fault_addr, phyframe_pfn(info->backing_page), map_flags);
488 fault_result = VMFAULT_COMPLETE;
489 break;
490 }
491 }
492
493 MOS_ASSERT_X(fault_result == VMFAULT_COMPLETE || fault_result == VMFAULT_CANNOT_HANDLE, "invalid fault result %d", fault_result);
494 if (ip_vmap)
495 spinlock_release(&ip_vmap->lock);
496 if (fault_vmap != ip_vmap)
497 spinlock_release(&fault_vmap->lock);
500 if (fault_result == VMFAULT_COMPLETE)
501 return;
502
503 DoUnhandledPageFault();
504}
505
506// ! sysfs support
507
508static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
509{
510 MOS_UNUSED(f);
511 // mInfo << "mem: mapping " << vmap->vaddr << " to " << offset << "\n";
512 mm_do_map(vmap->mmctx->pgd, vmap->vaddr, offset / MOS_PAGE_SIZE, vmap->npages, vmap->vmflags, false);
513 return true;
514}
515
516static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
517{
518 MOS_UNUSED(f);
519 mm_do_unmap(vmap->mmctx->pgd, vmap->vaddr, vmap->npages, false);
520 *unmapped = true;
521 return true;
522}
523
525
526static void mm_sysfs_init()
527{
528 sys_mem_item.mem.size = platform_info->max_pfn * MOS_PAGE_SIZE;
530}
531
#define MOS_ASSERT_X(cond, msg,...)
Definition assert.hpp:12
#define MOS_ASSERT(cond)
Definition assert.hpp:19
#define MOS_PAGE_SIZE
Definition autoconf.h:6
long signal_send_to_thread(Thread *target, signal_t signal)
Send a signal to a thread.
Definition signal.cpp:88
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
Definition list.cpp:15
MOSAPI void list_node_append(list_node_t *head, list_node_t *item)
Definition list.cpp:68
#define list_foreach(t, v, h)
Iterate over a list.
Definition list.hpp:89
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
Definition list.hpp:74
#define list_insert_before(element, item)
Definition list.hpp:78
MOSAPI bool list_is_empty(const list_node_t *head)
Definition list.cpp:21
#define list_remove(element)
Definition list.hpp:80
MMContext * mm_switch_context(MMContext *new_ctx)
Definition mm.cpp:124
#define phyframe_va(frame)
Definition mm.hpp:86
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
Definition mm.cpp:254
vmfault_result_t
Definition mm.hpp:47
vmap_type_t
Definition mm.hpp:32
phyframe_t * mm_get_free_page(void)
Definition mm.cpp:38
vmap_t * vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
Definition mm.cpp:189
phyframe_t * mm_get_free_page_raw(void)
Definition mm.cpp:26
vmap_t * vmap_split(vmap_t *first, size_t split)
Split a vmap object into two, at the specified offset.
Definition mm.cpp:209
void mm_lock_context_pair(MMContext *ctx1_, MMContext *ctx2_)
Lock and unlock a pair of MMContext objects.
Definition mm.cpp:87
void mm_destroy_context(MMContext *mmctx)
Destroy a user-mode platform-dependent page table.
Definition mm.cpp:75
vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
Helper function to resolve a copy-on-write fault.
Definition mm.cpp:270
MMContext * mm_create_context(void)
Create a user-mode platform-dependent page table.
Definition mm.cpp:59
void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
Handle a page fault.
Definition mm.cpp:363
phyframe_t * mm_get_free_pages(size_t npages)
Definition mm.cpp:47
void mm_unlock_context_pair(MMContext *ctx1_, MMContext *ctx2_)
Definition mm.cpp:105
vmap_t * vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
Definition mm.cpp:156
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
Definition mm.cpp:168
vmap_content_t
Definition mm.hpp:23
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
Definition mm.cpp:231
@ VMFAULT_COPY_BACKING_PAGE
the caller should copy the backing page into the faulting address
Definition mm.hpp:51
@ VMFAULT_MAP_BACKING_PAGE
the caller should map the backing page into the faulting address
Definition mm.hpp:50
@ VMFAULT_COMPLETE
no further action is needed, the page is correctly mapped now
Definition mm.hpp:48
@ VMFAULT_CANNOT_HANDLE
the handler cannot handle this fault
Definition mm.hpp:52
@ VMFAULT_MAP_BACKING_PAGE_RO
the caller should map the backing page into the faulting address, and mark it non-writable
Definition mm.hpp:49
@ VMAP_UNKNOWN
Definition mm.hpp:24
void mm_replace_page_locked(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, VMFlags flags)
Replace the mappings of a page with a new physical frame.
Definition paging.cpp:112
#define pfn_phyframe(pfn)
Definition pmm.hpp:74
#define phyframe_pfn(frame)
Definition pmm.hpp:73
phyframe_t * pmm_allocate_frames(size_t n_frames, pmm_allocation_flags_t flags)
Allocate n_frames of contiguous physical memory.
Definition pmm.cpp:92
@ PMM_ALLOC_NORMAL
allocate normal pages
Definition pmm.hpp:68
@ IPI_TYPE_INVALIDATE_TLB
Definition ipi.hpp:15
void ipi_send_all(ipi_type_t type)
static sysfs_item_t sys_mem_item
Definition mm.cpp:524
static void mm_sysfs_init()
Definition mm.cpp:526
static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
Definition mm.cpp:283
static void do_attach_vmap(MMContext *mmctx, vmap_t *vmap)
Definition mm.cpp:135
static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
Definition mm.cpp:516
static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
Definition mm.cpp:508
void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
Definition mm.cpp:265
@ VM_EXEC
Definition mm_types.hpp:15
@ VM_WRITE
Definition mm_types.hpp:14
#define MOS_IN_RANGE(addr, start, end)
Definition mos_global.h:79
#define KB
Definition mos_global.h:98
#define MOS_UNUSED(x)
Definition mos_global.h:65
T * create(Args &&...args)
Definition allocator.hpp:12
#define mos_panic(fmt,...)
Definition panic.hpp:51
static void * memcpy(void *s1, const void *s2, size_t n)
Definition pb_syshdr.h:90
#define NULL
Definition pb_syshdr.h:46
#define current_thread
Definition platform.hpp:33
#define current_mm
Definition platform.hpp:35
#define current_cpu
Definition platform.hpp:32
#define current_process
Definition platform.hpp:34
__nodiscard bool pml5_destroy_range(pml5_t pml5, ptr_t *vaddr, size_t *n_pages)
Definition pml5.cpp:17
#define pgd_create(top)
Definition pml_types.hpp:89
#define pml_create_table(x)
void process_dump_mmaps(const Process *process)
Definition process.cpp:373
#define MOS_KERNEL_START_VADDR
#define MOS_USER_END_VADDR
#define PML4_ENTRIES
mos_platform_info_t *const platform_info
void platform_switch_mm(const MMContext *new_mm)
void platform_dump_regs(const platform_regs_t *regs)
void platform_dump_stack(const platform_regs_t *regs)
#define memzero(ptr, size)
#define MOS_INIT(_comp, _fn)
Definition setup.hpp:39
should_inline bool spinlock_is_locked(const spinlock_t *lock)
Definition spinlock.hpp:71
#define spinlock_acquire(lock)
Definition spinlock.hpp:64
#define spinlock_release(lock)
Definition spinlock.hpp:65
bool unmap(vmap_t *vmap, bool *unmapped)
Definition io.cpp:191
IO * ref()
Definition io.hpp:57
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
Definition platform.hpp:63
list_head mmaps
Definition platform.hpp:65
pgd_t pgd
Definition platform.hpp:64
bool is_exec
Definition mm.hpp:39
ptr_t ip
the instruction pointer which caused the fault
Definition mm.hpp:40
bool is_present
Definition mm.hpp:39
phyframe_t * faulting_page
the frame that contains the copy-on-write data (if any)
Definition mm.hpp:42
bool is_user
Definition mm.hpp:39
const phyframe_t * backing_page
the frame that contains the data for this page, the on_fault handler should set this
Definition mm.hpp:43
const platform_regs_t * regs
the registers of the moment that caused the fault
Definition mm.hpp:41
bool is_write
Definition mm.hpp:39
Definition mm.hpp:60
vmap_content_t content
Definition mm.hpp:72
ptr_t vaddr
Definition mm.hpp:64
VMFlags vmflags
Definition mm.hpp:66
size_t npages
Definition mm.hpp:65
vmfault_handler_t on_fault
Definition mm.hpp:75
IO * io
Definition mm.hpp:69
spinlock_t lock
Definition mm.hpp:62
MMContext * mmctx
Definition mm.hpp:67
off_t io_offset
Definition mm.hpp:70
vmap_type_t type
Definition mm.hpp:73
should_inline void sysfs_register_root_file(sysfs_item_t *item)
Register an entry in the sysfs root directory.
Definition sysfs.hpp:94
#define SYSFS_MEM_ITEM(_name, _mmap_fn, _munmap_fn)
Definition sysfs.hpp:45
constexpr auto mCont
Definition syslog.hpp:157
constexpr auto mInfo
Definition syslog.hpp:152
constexpr auto dEmph
Definition syslog.hpp:153
constexpr auto mWarn
Definition syslog.hpp:154
#define f(_fmt)
Definition syslog.hpp:160
constexpr auto mEmerg
Definition syslog.hpp:155
constexpr auto dCont
Definition syslog.hpp:157
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, VMFlags flags, bool do_refcount)
Definition table_ops.cpp:23
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, VMFlags flags)
Definition table_ops.cpp:29
VMFlags mm_do_get_flags(pgd_t max, ptr_t vaddr)
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
Definition table_ops.cpp:67
void mm_do_unmap(pgd_t top, ptr_t vaddr, size_t n_pages, bool do_unref)
Definition table_ops.cpp:35
ssize_t off_t
Definition types.h:80
unsigned long ptr_t
Definition types.h:21