MOS Source Code
Loading...
Searching...
No Matches
mm.c
Go to the documentation of this file.
1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/mm/mm.h"
4
6#include "mos/interrupt/ipi.h"
10#include "mos/mm/physical/pmm.h"
13#include "mos/platform/platform_defs.h"
14#include "mos/syslog/printk.h"
15#include "mos/tasks/signal.h"
17
20#include <mos/mos_global.h>
21#include <mos_stdlib.h>
22#include <mos_string.h>
23
24#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
25#include "mos/tasks/process.h"
26#endif
27
30
33
35{
37 if (!frame)
38 {
39 pr_emerg("failed to allocate a page");
40 return NULL;
41 }
42
43 return frame;
44}
45
47{
49 if (!frame)
50 return NULL;
51 memzero((void *) phyframe_va(frame), MOS_PAGE_SIZE);
52 return frame;
53}
54
56{
58 if (!frame)
59 {
60 pr_emerg("failed to allocate %zd pages", npages);
61 return NULL;
62 }
63
64 return frame;
65}
66
68{
69 mm_context_t *mmctx = kmalloc(mm_context_cache);
70 linked_list_init(&mmctx->mmaps);
71
72 pml4_t pml4 = pml_create_table(pml4);
73
74 // map the upper half of the address space to the kernel
75 for (int i = pml4_index(MOS_KERNEL_START_VADDR); i < PML4_ENTRIES; i++)
76 pml4.table[i] = platform_info->kernel_mm->pgd.max.next.table[i];
77
78 mmctx->pgd = pgd_create(pml4);
79
80 return mmctx;
81}
82
84{
85 MOS_ASSERT(mmctx != platform_info->kernel_mm); // you can't destroy the kernel mmctx
87
88 ptr_t zero = 0;
89 size_t userspace_npages = (MOS_USER_END_VADDR + 1) / MOS_PAGE_SIZE;
90 const bool freed = pml5_destroy_range(mmctx->pgd.max, &zero, &userspace_npages);
91 MOS_ASSERT_X(freed, "failed to free the entire userspace");
92 kfree(mmctx);
93}
94
96{
97 if (ctx1 == ctx2 || ctx2 == NULL)
99 else if (ctx1 < ctx2)
100 {
103 }
104 else
105 {
108 }
109}
110
112{
113 if (ctx1 == ctx2 || ctx2 == NULL)
115 else if (ctx1 < ctx2)
116 {
119 }
120 else
121 {
124 }
125}
126
128{
129 mm_context_t *old_ctx = current_cpu->mm_context;
130 if (old_ctx == new_ctx)
131 return old_ctx;
132
133 platform_switch_mm(new_ctx);
134 current_cpu->mm_context = new_ctx;
135 return old_ctx;
136}
137
138static void do_attach_vmap(mm_context_t *mmctx, vmap_t *vmap)
139{
141 MOS_ASSERT_X(list_is_empty(list_node(vmap)), "vmap is already attached to something");
142 MOS_ASSERT(vmap->mmctx == NULL || vmap->mmctx == mmctx);
143
144 vmap->mmctx = mmctx;
145
146 // add to the list, sorted by address
147 list_foreach(vmap_t, m, mmctx->mmaps)
148 {
149 if (m->vaddr > vmap->vaddr)
150 {
151 list_insert_before(m, vmap);
152 return;
153 }
154 }
155
156 list_node_append(&mmctx->mmaps, list_node(vmap)); // append at the end
157}
158
159vmap_t *vmap_create(mm_context_t *mmctx, ptr_t vaddr, size_t npages)
160{
161 MOS_ASSERT_X(mmctx != platform_info->kernel_mm, "you can't create vmaps in the kernel mmctx");
162 vmap_t *map = kmalloc(vmap_cache);
164 spinlock_acquire(&map->lock);
165 map->vaddr = vaddr;
166 map->npages = npages;
167 do_attach_vmap(mmctx, map);
168 return map;
169}
170
172{
174 mm_context_t *const mm = vmap->mmctx;
176 if (vmap->io)
177 {
178 bool unmapped = false;
179 if (!io_munmap(vmap->io, vmap, &unmapped))
180 pr_warn("munmap: could not unmap the file: io_munmap() failed");
181
182 if (unmapped)
183 goto unmapped;
184 }
185 mm_do_unmap(mm->pgd, vmap->vaddr, vmap->npages, true);
186
187unmapped:
188 list_remove(vmap);
189 kfree(vmap);
190}
191
192vmap_t *vmap_obtain(mm_context_t *mmctx, ptr_t vaddr, size_t *out_offset)
193{
195
196 list_foreach(vmap_t, m, mmctx->mmaps)
197 {
198 if (m->vaddr <= vaddr && vaddr < m->vaddr + m->npages * MOS_PAGE_SIZE)
199 {
200 spinlock_acquire(&m->lock);
201 if (out_offset)
202 *out_offset = vaddr - m->vaddr;
203 return m;
204 }
205 }
206
207 if (out_offset)
208 *out_offset = 0;
209 return NULL;
210}
211
212vmap_t *vmap_split(vmap_t *first, size_t split)
213{
215 MOS_ASSERT(split && split < first->npages);
216
217 vmap_t *second = kmalloc(vmap_cache);
218 *second = *first; // copy the whole structure
219 linked_list_init(list_node(second)); // except for the list node
220
221 first->npages = split; // shrink the first vmap
222 second->npages -= split;
223 second->vaddr += split * MOS_PAGE_SIZE;
224 if (first->io)
225 {
226 second->io = io_ref(first->io); // ref the io again
227 second->io_offset += split * MOS_PAGE_SIZE;
228 }
229
230 do_attach_vmap(first->mmctx, second);
231 return second;
232}
233
234vmap_t *vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
235{
237
241
242 if (rstart_pgoff == 0 && rend_pgoff == vmap->npages)
243 return vmap;
244
245 if (rstart_pgoff == 0)
246 return vmap_split(vmap, rend_pgoff);
247
248 if (rend_pgoff == vmap->npages)
249 return vmap_split(vmap, rstart_pgoff);
250
251 vmap_t *second = vmap_split(vmap, rstart_pgoff);
252 vmap_t *third = vmap_split(second, rend_pgoff - rstart_pgoff);
253 spinlock_release(&third->lock);
254 return second;
255}
256
258{
260 MOS_ASSERT_X(content != VMAP_UNKNOWN, "vmap content cannot be unknown");
261 MOS_ASSERT_X(vmap->content == VMAP_UNKNOWN || vmap->content == content, "vmap is already setup");
262
263 vmap->content = content;
264 vmap->type = type;
265 spinlock_release(&vmap->lock);
266}
267
268void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
269{
270 memcpy((void *) phyframe_va(dst), (void *) phyframe_va(src), MOS_PAGE_SIZE);
271}
272
274{
276 MOS_ASSERT(info->is_write && info->is_present);
277
278 // fast path to handle CoW
280 mm_copy_page(info->faulting_page, page);
281 mm_replace_page_locked(vmap->mmctx, fault_addr, phyframe_pfn(page), vmap->vmflags);
282
283 return VMFAULT_COMPLETE;
284}
285
286static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
287{
288 pr_emerg("unhandled page fault: %s", unhandled_reason);
289#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
290 pr_emerg(" invalid %s mode %s %s page [" PTR_FMT "]", //
291 info->is_user ? "user" : "kernel", //
292 info->is_write ? "write to" : (info->is_exec ? "execute in" : "read from"), //
293 info->is_present ? "present" : "non-present", //
294 fault_addr //
295 );
296
297 pr_emerg(" instruction: " PTR_FMT, info->ip);
298 if (ip_vmap)
299 {
300 pr_emerg(" vmap: %pvm", (void *) ip_vmap);
301 pr_emerg(" offset: 0x%zx", info->ip - ip_vmap->vaddr + (ip_vmap->io ? ip_vmap->io_offset : 0));
302 }
303
304 pr_emerg(" thread: %pt", (void *) current_thread);
305 pr_emerg(" process: %pp", current_thread ? (void *) current_process : NULL);
306
307 if (fault_addr < 1 KB)
308 {
309 if (info->is_write)
310 pr_emerg(" possible write to NULL pointer");
311 else if (info->is_exec && fault_addr == 0)
312 pr_emerg(" attempted to execute NULL pointer");
313 else
314 pr_emerg(" possible NULL pointer dereference");
315 }
316
317 if (info->is_user && fault_addr > MOS_KERNEL_START_VADDR)
318 pr_emerg(" kernel address dereference");
319
320 if (info->ip > MOS_KERNEL_START_VADDR)
321 pr_emerg(" in kernel function %ps", (void *) info->ip);
322
323 if (faulting_vmap)
324 {
325 pr_emerg(" in vmap: %pvm", (void *) faulting_vmap);
326 pr_emerg(" offset: 0x%zx", fault_addr - faulting_vmap->vaddr + (faulting_vmap->io ? faulting_vmap->io_offset : 0));
327 }
328
329 if (faulting_vmap)
330 spinlock_release(&faulting_vmap->lock);
331
332 if (ip_vmap)
333 spinlock_release(&ip_vmap->lock);
334
335 if (current_thread)
336 spinlock_release(&current_thread->owner->mm->mm_lock);
337
338#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
339 if (current_thread)
341#endif
342
343 pr_info("stack trace before fault (may be unreliable):");
345
346 pr_info("register states before fault:");
348 pr_cont("\n");
349#else
350 MOS_UNUSED(fault_addr);
351 MOS_UNUSED(info);
352#endif
353
354 if (current_thread)
355 {
358 }
359 else
360 {
361 MOS_ASSERT(!"unhandled kernel page fault");
362 }
363}
364
365void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
366{
368 const char *unhandled_reason = NULL;
369
370 pr_demph(pagefault, "%s #PF: %pt, %pp, IP=" PTR_VLFMT ", ADDR=" PTR_VLFMT, //
371 info->is_user ? "user" : "kernel", //
372 current ? (void *) current : NULL, //
373 current ? (void *) current->owner : NULL, //
374 info->ip, //
375 fault_addr //
376 );
377
378 if (info->is_write && info->is_exec)
379 mos_panic("Cannot write and execute at the same time");
380
381 size_t offset = 0;
382 vmap_t *fault_vmap = NULL;
383 vmap_t *ip_vmap = NULL;
384
385 if (!current_mm)
386 {
387 unhandled_reason = "no mm context";
388 goto unhandled_fault;
389 }
390
391 mm_context_t *const mm = current_mm;
393
394 fault_vmap = vmap_obtain(mm, fault_addr, &offset);
395 if (!fault_vmap)
396 {
397 ip_vmap = vmap_obtain(mm, info->ip, NULL);
398 unhandled_reason = "page fault in unmapped area";
400 goto unhandled_fault;
401 }
402 ip_vmap = MOS_IN_RANGE(info->ip, fault_vmap->vaddr, fault_vmap->vaddr + fault_vmap->npages * MOS_PAGE_SIZE) ? fault_vmap : vmap_obtain(mm, info->ip, NULL);
403
404 MOS_ASSERT_X(fault_vmap->on_fault, "vmap %pvm has no fault handler", (void *) fault_vmap);
405 const vm_flags page_flags = mm_do_get_flags(fault_vmap->mmctx->pgd, fault_addr);
406
407 if (info->is_exec && !(fault_vmap->vmflags & VM_EXEC))
408 {
409 unhandled_reason = "page fault in non-executable vmap";
411 goto unhandled_fault;
412 }
413 else if (info->is_present && info->is_exec && fault_vmap->vmflags & VM_EXEC && !(page_flags & VM_EXEC))
414 {
415 // vmprotect has been called on this vmap to enable execution
416 // we need to make sure that the page is executable
417 mm_do_flag(fault_vmap->mmctx->pgd, fault_addr, 1, page_flags | VM_EXEC);
419 spinlock_release(&fault_vmap->lock);
420 if (ip_vmap)
421 spinlock_release(&ip_vmap->lock);
422 return;
423 }
424
425 if (info->is_write && !(fault_vmap->vmflags & VM_WRITE))
426 {
427 unhandled_reason = "page fault in read-only vmap";
429 goto unhandled_fault;
430 }
431
432 if (info->is_present)
433 info->faulting_page = pfn_phyframe(mm_do_get_pfn(fault_vmap->mmctx->pgd, fault_addr));
434
435 static const char *const fault_result_names[] = {
436 [VMFAULT_COMPLETE] = "COMPLETE",
437 [VMFAULT_COPY_BACKING_PAGE] = "COPY_BACKING_PAGE",
438 [VMFAULT_MAP_BACKING_PAGE] = "MAP_BACKING_PAGE",
439 [VMFAULT_MAP_BACKING_PAGE_RO] = "MAP_BACKING_PAGE_RO",
440 [VMFAULT_CANNOT_HANDLE] = "CANNOT_HANDLE",
441 };
442
443 pr_dcont(pagefault, ", handler %ps", (void *) (ptr_t) fault_vmap->on_fault);
444 vmfault_result_t fault_result = fault_vmap->on_fault(fault_vmap, fault_addr, info);
445 pr_dcont(pagefault, " -> %s", fault_result_names[fault_result]);
446
447 vm_flags map_flags = fault_vmap->vmflags;
448 switch (fault_result)
449 {
450 case VMFAULT_COMPLETE: break;
452 {
453 unhandled_reason = "vmap fault handler returned VMFAULT_CANNOT_HANDLE";
454 goto unhandled_fault;
455 }
457 {
458 MOS_ASSERT(info->backing_page && !IS_ERR(info->backing_page));
459 const phyframe_t *page = mm_get_free_page(); // will be ref'd by mm_replace_page_locked()
460 mm_copy_page(info->backing_page, page);
461 info->backing_page = page;
462 goto map_backing_page;
463 }
465 {
466 map_flags &= ~VM_WRITE;
467 goto map_backing_page;
468 }
470 {
471 map_backing_page:
472 if (!info->backing_page)
473 {
474 unhandled_reason = "out of memory";
476 goto unhandled_fault;
477 }
478
479 pr_dcont(pagefault, " (backing page: " PFN_FMT ")", phyframe_pfn(info->backing_page));
480 mm_replace_page_locked(fault_vmap->mmctx, fault_addr, phyframe_pfn(info->backing_page), map_flags);
481 fault_result = VMFAULT_COMPLETE;
482 }
483 }
484
485 MOS_ASSERT_X(fault_result == VMFAULT_COMPLETE || fault_result == VMFAULT_CANNOT_HANDLE, "invalid fault result %d", fault_result);
486 if (ip_vmap)
487 spinlock_release(&ip_vmap->lock);
488 spinlock_release(&fault_vmap->lock);
491 if (fault_result == VMFAULT_COMPLETE)
492 return;
493
494// if we get here, the fault was not handled
495unhandled_fault:
496 MOS_ASSERT_X(unhandled_reason, "unhandled fault with no reason");
497 invalid_page_fault(fault_addr, fault_vmap, ip_vmap, info, unhandled_reason);
498}
499
500// ! sysfs support
501
502static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
503{
504 MOS_UNUSED(f);
505 // pr_info("mem: mapping " PTR_VLFMT " to " PTR_VLFMT "\n", vmap->vaddr, offset);
506 mm_do_map(vmap->mmctx->pgd, vmap->vaddr, offset / MOS_PAGE_SIZE, vmap->npages, vmap->vmflags, false);
507 return true;
508}
509
510static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
511{
512 MOS_UNUSED(f);
513 mm_do_unmap(vmap->mmctx->pgd, vmap->vaddr, vmap->npages, false);
514 *unmapped = true;
515 return true;
516}
517
519
525
#define MOS_ASSERT_X(cond, msg,...)
Definition assert.h:15
#define MOS_ASSERT(cond)
Definition assert.h:14
#define MOS_PAGE_SIZE
Definition autoconf.h:6
long signal_send_to_thread(thread_t *target, signal_t signal)
Send a signal to a thread.
Definition signal.c:92
void signal_exit_to_user_prepare(platform_regs_t *regs)
Prepare to exit to userspace.
Definition signal.c:245
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
Definition list.c:15
MOSAPI void list_node_append(list_node_t *head, list_node_t *item)
Definition list.c:68
#define list_foreach(t, v, h)
Iterate over a list.
Definition list.h:83
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
Definition list.h:68
#define list_insert_before(element, item)
Definition list.h:72
MOSAPI bool list_is_empty(const list_node_t *head)
Definition list.c:21
#define list_remove(element)
Definition list.h:74
phyframe_t * mm_get_free_page(void)
Definition mm.c:46
#define phyframe_va(frame)
Definition mm.h:79
mm_context_t * mm_switch_context(mm_context_t *new_ctx)
Definition mm.c:127
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
Definition mm.c:257
vmfault_result_t
Definition mm.h:45
vmap_type_t
Definition mm.h:30
vmap_t * vmap_obtain(mm_context_t *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
Definition mm.c:192
phyframe_t * mm_get_free_page_raw(void)
Definition mm.c:34
vmap_t * vmap_split(vmap_t *first, size_t split)
Split a vmap object into two, at the specified offset.
Definition mm.c:212
void mm_unlock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
Definition mm.c:111
vmap_t * vmap_create(mm_context_t *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
Definition mm.c:159
vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
Helper function to resolve a copy-on-write fault.
Definition mm.c:273
void mm_destroy_context(mm_context_t *mmctx)
Destroy a user-mode platform-dependent page table.
Definition mm.c:83
void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
Handle a page fault.
Definition mm.c:365
phyframe_t * mm_get_free_pages(size_t npages)
Definition mm.c:55
mm_context_t * mm_create_context(void)
Create a user-mode platform-dependent page table.
Definition mm.c:67
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
Definition mm.c:171
vmap_content_t
Definition mm.h:21
void mm_lock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
Lock and unlock a pair of mm_context_t objects.
Definition mm.c:95
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
Definition mm.c:234
@ VMFAULT_COPY_BACKING_PAGE
the caller should copy the backing page into the faulting address
Definition mm.h:49
@ VMFAULT_MAP_BACKING_PAGE
the caller should map the backing page into the faulting address
Definition mm.h:48
@ VMFAULT_COMPLETE
no further action is needed, the page is correctly mapped now
Definition mm.h:46
@ VMFAULT_CANNOT_HANDLE
the handler cannot handle this fault
Definition mm.h:50
@ VMFAULT_MAP_BACKING_PAGE_RO
the caller should map the backing page into the faulting address, and mark it non-writable
Definition mm.h:47
@ VMAP_UNKNOWN
Definition mm.h:22
void mm_replace_page_locked(mm_context_t *mmctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
Replace the mappings of a page with a new physical frame.
Definition paging.c:112
#define pfn_phyframe(pfn)
Definition pmm.h:81
#define phyframe_pfn(frame)
Definition pmm.h:80
phyframe_t * pmm_allocate_frames(size_t n_frames, pmm_allocation_flags_t flags)
Allocate n_frames of contiguous physical memory.
Definition pmm.c:92
@ PMM_ALLOC_NORMAL
allocate normal pages
Definition pmm.h:75
bool io_munmap(io_t *io, vmap_t *vmap, bool *unmapped)
Definition io.c:273
io_t * io_ref(io_t *io)
Definition io.c:73
@ IPI_TYPE_INVALIDATE_TLB
Definition ipi.h:14
void ipi_send_all(ipi_type_t type)
static sysfs_item_t sys_mem_item
Definition mm.c:518
static void mm_sysfs_init()
Definition mm.c:520
static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
Definition mm.c:286
static slab_t * vmap_cache
Definition mm.c:28
static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
Definition mm.c:510
static slab_t * mm_context_cache
Definition mm.c:31
static void do_attach_vmap(mm_context_t *mmctx, vmap_t *vmap)
Definition mm.c:138
static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
Definition mm.c:502
void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
Definition mm.c:268
#define MOS_IN_RANGE(addr, start, end)
Definition mos_global.h:78
#define KB
Definition mos_global.h:88
#define MOS_UNUSED(x)
Definition mos_global.h:64
#define current
#define mos_panic(fmt,...)
Definition panic.h:55
static void * memcpy(void *s1, const void *s2, size_t n)
Definition pb_syshdr.h:90
#define NULL
Definition pb_syshdr.h:46
#define current_thread
Definition platform.h:30
vm_flags
Definition platform.h:40
@ VM_EXEC
Definition platform.h:43
@ VM_WRITE
Definition platform.h:42
#define current_mm
Definition platform.h:32
#define current_cpu
Definition platform.h:29
#define current_process
Definition platform.h:31
__nodiscard bool pml5_destroy_range(pml5_t pml5, ptr_t *vaddr, size_t *n_pages)
Definition pml5.c:17
#define pgd_create(top)
Definition pml_types.h:89
#define pml_create_table(x)
Definition pml_types.h:107
#define pr_warn(fmt,...)
Definition printk.h:38
#define pr_emerg(fmt,...)
Definition printk.h:39
#define pr_info(fmt,...)
Definition printk.h:35
#define pr_demph(feat, fmt,...)
Definition printk.h:29
#define pr_dcont(feat, fmt,...)
Definition printk.h:33
#define pr_cont(fmt,...)
Definition printk.h:41
void process_dump_mmaps(const process_t *process)
Definition process.c:365
#define MOS_KERNEL_START_VADDR
#define MOS_USER_END_VADDR
#define PML4_ENTRIES
mos_platform_info_t *const platform_info
void platform_switch_mm(const mm_context_t *new_mm)
void platform_dump_regs(platform_regs_t *regs)
void platform_dump_stack(platform_regs_t *regs)
#define memzero(ptr, size)
Definition rpc_client.c:40
#define MOS_INIT(_comp, _fn)
Definition setup.h:40
#define SLAB_AUTOINIT(name, var, type)
should_inline bool spinlock_is_locked(const spinlock_t *lock)
Definition spinlock.h:68
#define spinlock_acquire(lock)
Definition spinlock.h:61
#define spinlock_release(lock)
Definition spinlock.h:62
A node in a linked list.
Definition list.h:27
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
Definition platform.h:81
list_head mmaps
Definition platform.h:83
pgd_t pgd
Definition platform.h:82
mm_context_t * kernel_mm
Definition platform.h:118
bool is_exec
Definition mm.h:37
ptr_t ip
the instruction pointer which caused the fault
Definition mm.h:38
bool is_present
Definition mm.h:37
platform_regs_t * regs
the registers of the moment that caused the fault
Definition mm.h:39
phyframe_t * faulting_page
the frame that contains the copy-on-write data (if any)
Definition mm.h:40
bool is_user
Definition mm.h:37
const phyframe_t * backing_page
the frame that contains the data for this page, the on_fault handler should set this
Definition mm.h:41
bool is_write
Definition mm.h:37
Definition slab.h:45
size_t size
Definition sysfs.h:34
struct sysfs_item_t::@8::@10 mem
Definition mm.h:58
vmap_content_t content
Definition mm.h:70
ptr_t vaddr
Definition mm.h:62
size_t npages
Definition mm.h:63
vmfault_handler_t on_fault
Definition mm.h:73
vm_flags vmflags
Definition mm.h:64
spinlock_t lock
Definition mm.h:60
io_t * io
Definition mm.h:67
mm_context_t * mmctx
Definition mm.h:65
off_t io_offset
Definition mm.h:68
vmap_type_t type
Definition mm.h:71
should_inline void sysfs_register_root_file(sysfs_item_t *item)
Register an entry in the sysfs root directory.
Definition sysfs.h:105
#define SYSFS_MEM_ITEM(_name, _mmap_fn, _munmap_fn)
Definition sysfs.h:51
vm_flags mm_do_get_flags(pgd_t max, ptr_t vaddr)
Definition table_ops.c:112
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, vm_flags flags, bool do_refcount)
Definition table_ops.c:23
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
Definition table_ops.c:67
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, vm_flags flags)
Definition table_ops.c:29
void mm_do_unmap(pgd_t top, ptr_t vaddr, size_t n_pages, bool do_unref)
Definition table_ops.c:35
#define PTR_VLFMT
Definition types.h:34
ssize_t off_t
Definition types.h:84
#define PFN_FMT
Definition types.h:42
#define PTR_FMT
Definition types.h:33
unsigned long ptr_t
Definition types.h:25