13#include "mos/platform/platform_defs.hpp"
23#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
32 pr_emerg(
"failed to allocate a page");
53 pr_emerg(
"failed to allocate %zd pages", npages);
69 pml4.table[i] =
platform_info->kernel_mm->pgd.max.next.table[i];
84 MOS_ASSERT_X(freed,
"failed to free the entire userspace");
90 if (ctx1 == ctx2 || ctx2 ==
NULL)
106 if (ctx1 == ctx2 || ctx2 ==
NULL)
108 else if (ctx1 < ctx2)
123 if (old_ctx == new_ctx)
142 if (m->vaddr > vmap->
vaddr)
171 bool unmapped =
false;
173 pr_warn(
"munmap: could not unmap the file: io_munmap() failed");
191 if (m->vaddr <= vaddr && vaddr < m->vaddr + m->npages *
MOS_PAGE_SIZE)
195 *out_offset = vaddr - m->vaddr;
235 if (rstart_pgoff == 0 && rend_pgoff == vmap->
npages)
238 if (rstart_pgoff == 0)
241 if (rend_pgoff == vmap->
npages)
281 pr_emerg(
"unhandled page fault: %s", unhandled_reason);
282#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
284 info->
is_user ?
"user" :
"kernel",
285 info->
is_write ?
"write to" : (info->
is_exec ?
"execute in" :
"read from"),
293 pr_emerg(
" vmap: %pvm", (
void *) ip_vmap);
294 pr_emerg(
" offset: 0x%zx", info->
ip - ip_vmap->vaddr + (ip_vmap->io ? ip_vmap->io_offset : 0));
300 if (fault_addr < 1
KB)
303 pr_emerg(
" possible write to NULL pointer");
304 else if (info->
is_exec && fault_addr == 0)
305 pr_emerg(
" attempted to execute NULL pointer");
307 pr_emerg(
" possible NULL pointer dereference");
311 pr_emerg(
" kernel address dereference");
314 pr_emerg(
" in kernel function %ps", (
void *) info->
ip);
318 pr_emerg(
" in vmap: %pvm", (
void *) faulting_vmap);
319 pr_emerg(
" offset: 0x%zx", fault_addr - faulting_vmap->vaddr + (faulting_vmap->io ? faulting_vmap->io_offset : 0));
331#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
336 pr_info(
"stack trace before fault (may be unreliable):");
339 pr_info(
"register states before fault:");
360 const char *unhandled_reason =
NULL;
363 info->
is_user ?
"user" :
"kernel",
371 mos_panic(
"Cannot write and execute at the same time");
377 const auto DoUnhandledPageFault = [&]()
380 MOS_ASSERT_X(unhandled_reason,
"unhandled fault with no reason");
386 unhandled_reason =
"no mm context";
387 DoUnhandledPageFault();
398 unhandled_reason =
"page fault in unmapped area";
400 DoUnhandledPageFault();
410 unhandled_reason =
"page fault in non-executable vmap";
412 DoUnhandledPageFault();
429 unhandled_reason =
"page fault in read-only vmap";
431 DoUnhandledPageFault();
447 default:
return "UNKNOWN";
453 pr_dcont(pagefault,
" -> %s", get_fault_result(fault_result));
456 switch (fault_result)
461 unhandled_reason =
"vmap fault handler returned VMFAULT_CANNOT_HANDLE";
462 DoUnhandledPageFault();
471 goto map_backing_page;
476 goto map_backing_page;
483 unhandled_reason =
"out of memory";
485 DoUnhandledPageFault();
504 DoUnhandledPageFault();
#define MOS_ASSERT_X(cond, msg,...)
void signal_exit_to_user_prepare(platform_regs_t *regs)
Prepare to exit to userspace.
long signal_send_to_thread(Thread *target, signal_t signal)
Send a signal to a thread.
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
MOSAPI void list_node_append(list_node_t *head, list_node_t *item)
#define list_foreach(t, v, h)
Iterate over a list.
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
#define list_insert_before(element, item)
MOSAPI bool list_is_empty(const list_node_t *head)
#define list_remove(element)
MMContext * mm_switch_context(MMContext *new_ctx)
#define phyframe_va(frame)
void mm_lock_ctx_pair(MMContext *ctx1, MMContext *ctx2)
Lock and unlock a pair of MMContext objects.
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
phyframe_t * mm_get_free_page(void)
vmap_t * vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
phyframe_t * mm_get_free_page_raw(void)
vmap_t * vmap_split(vmap_t *first, size_t split)
Split a vmap object into two, at the specified offset.
void mm_destroy_context(MMContext *mmctx)
Destroy a user-mode platform-dependent page table.
vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
Helper function to resolve a copy-on-write fault.
void mm_unlock_ctx_pair(MMContext *ctx1, MMContext *ctx2)
MMContext * mm_create_context(void)
Create a user-mode platform-dependent page table.
void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
Handle a page fault.
phyframe_t * mm_get_free_pages(size_t npages)
vmap_t * vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
@ VMFAULT_COPY_BACKING_PAGE
the caller should copy the backing page into the faulting address
@ VMFAULT_MAP_BACKING_PAGE
the caller should map the backing page into the faulting address
@ VMFAULT_COMPLETE
no further action is needed, the page is correctly mapped now
@ VMFAULT_CANNOT_HANDLE
the handler cannot handle this fault
@ VMFAULT_MAP_BACKING_PAGE_RO
the caller should map the backing page into the faulting address, and mark it non-writable
void mm_replace_page_locked(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
Replace the mappings of a page with a new physical frame.
#define pfn_phyframe(pfn)
#define phyframe_pfn(frame)
phyframe_t * pmm_allocate_frames(size_t n_frames, pmm_allocation_flags_t flags)
Allocate n_frames of contiguous physical memory.
@ PMM_ALLOC_NORMAL
allocate normal pages
bool io_munmap(io_t *io, vmap_t *vmap, bool *unmapped)
@ IPI_TYPE_INVALIDATE_TLB
void ipi_send_all(ipi_type_t type)
static sysfs_item_t sys_mem_item
static void mm_sysfs_init()
static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
static void do_attach_vmap(MMContext *mmctx, vmap_t *vmap)
static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
#define MOS_IN_RANGE(addr, start, end)
T * create(Args &&...args)
#define mos_panic(fmt,...)
static void * memcpy(void *s1, const void *s2, size_t n)
__nodiscard bool pml5_destroy_range(pml5_t pml5, ptr_t *vaddr, size_t *n_pages)
#define pml_create_table(x)
#define pr_emerg(fmt,...)
#define pr_demph(feat, fmt,...)
#define pr_dcont(feat, fmt,...)
void process_dump_mmaps(const Process *process)
#define memzero(ptr, size)
#define MOS_INIT(_comp, _fn)
should_inline bool spinlock_is_locked(const spinlock_t *lock)
#define spinlock_acquire(lock)
#define spinlock_release(lock)
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
ptr_t ip
the instruction pointer which caused the fault
platform_regs_t * regs
the registers of the moment that caused the fault
phyframe_t * faulting_page
the frame that contains the copy-on-write data (if any)
const phyframe_t * backing_page
the frame that contains the data for this page, the on_fault handler should set this
vmfault_handler_t on_fault
should_inline void sysfs_register_root_file(sysfs_item_t *item)
Register an entry in the sysfs root directory.
#define SYSFS_MEM_ITEM(_name, _mmap_fn, _munmap_fn)
vm_flags mm_do_get_flags(pgd_t max, ptr_t vaddr)
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, vm_flags flags, bool do_refcount)
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, vm_flags flags)
void mm_do_unmap(pgd_t top, ptr_t vaddr, size_t n_pages, bool do_unref)