13#include "mos/platform/platform_defs.h"
24#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
39 pr_emerg(
"failed to allocate a page");
60 pr_emerg(
"failed to allocate %zd pages", npages);
91 MOS_ASSERT_X(freed,
"failed to free the entire userspace");
97 if (ctx1 == ctx2 || ctx2 ==
NULL)
113 if (ctx1 == ctx2 || ctx2 ==
NULL)
115 else if (ctx1 < ctx2)
130 if (old_ctx == new_ctx)
149 if (m->vaddr > vmap->
vaddr)
178 bool unmapped =
false;
180 pr_warn(
"munmap: could not unmap the file: io_munmap() failed");
198 if (m->vaddr <= vaddr && vaddr < m->vaddr + m->npages *
MOS_PAGE_SIZE)
202 *out_offset = vaddr - m->vaddr;
242 if (rstart_pgoff == 0 && rend_pgoff == vmap->
npages)
245 if (rstart_pgoff == 0)
248 if (rend_pgoff == vmap->
npages)
288 pr_emerg(
"unhandled page fault: %s", unhandled_reason);
289#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
291 info->
is_user ?
"user" :
"kernel",
292 info->
is_write ?
"write to" : (info->
is_exec ?
"execute in" :
"read from"),
300 pr_emerg(
" vmap: %pvm", (
void *) ip_vmap);
301 pr_emerg(
" offset: 0x%zx", info->
ip - ip_vmap->vaddr + (ip_vmap->io ? ip_vmap->io_offset : 0));
307 if (fault_addr < 1
KB)
310 pr_emerg(
" possible write to NULL pointer");
311 else if (info->
is_exec && fault_addr == 0)
312 pr_emerg(
" attempted to execute NULL pointer");
314 pr_emerg(
" possible NULL pointer dereference");
318 pr_emerg(
" kernel address dereference");
321 pr_emerg(
" in kernel function %ps", (
void *) info->
ip);
325 pr_emerg(
" in vmap: %pvm", (
void *) faulting_vmap);
326 pr_emerg(
" offset: 0x%zx", fault_addr - faulting_vmap->vaddr + (faulting_vmap->io ? faulting_vmap->io_offset : 0));
338#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
343 pr_info(
"stack trace before fault (may be unreliable):");
346 pr_info(
"register states before fault:");
368 const char *unhandled_reason =
NULL;
371 info->
is_user ?
"user" :
"kernel",
379 mos_panic(
"Cannot write and execute at the same time");
387 unhandled_reason =
"no mm context";
388 goto unhandled_fault;
398 unhandled_reason =
"page fault in unmapped area";
400 goto unhandled_fault;
409 unhandled_reason =
"page fault in non-executable vmap";
411 goto unhandled_fault;
427 unhandled_reason =
"page fault in read-only vmap";
429 goto unhandled_fault;
435 static const char *
const fault_result_names[] = {
445 pr_dcont(pagefault,
" -> %s", fault_result_names[fault_result]);
448 switch (fault_result)
453 unhandled_reason =
"vmap fault handler returned VMFAULT_CANNOT_HANDLE";
454 goto unhandled_fault;
462 goto map_backing_page;
466 map_flags &= ~VM_WRITE;
467 goto map_backing_page;
474 unhandled_reason =
"out of memory";
476 goto unhandled_fault;
496 MOS_ASSERT_X(unhandled_reason,
"unhandled fault with no reason");
#define MOS_ASSERT_X(cond, msg,...)
long signal_send_to_thread(thread_t *target, signal_t signal)
Send a signal to a thread.
void signal_exit_to_user_prepare(platform_regs_t *regs)
Prepare to exit to userspace.
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
MOSAPI void list_node_append(list_node_t *head, list_node_t *item)
#define list_foreach(t, v, h)
Iterate over a list.
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
#define list_insert_before(element, item)
MOSAPI bool list_is_empty(const list_node_t *head)
#define list_remove(element)
phyframe_t * mm_get_free_page(void)
#define phyframe_va(frame)
mm_context_t * mm_switch_context(mm_context_t *new_ctx)
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
vmap_t * vmap_obtain(mm_context_t *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
phyframe_t * mm_get_free_page_raw(void)
vmap_t * vmap_split(vmap_t *first, size_t split)
Split a vmap object into two, at the specified offset.
void mm_unlock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
vmap_t * vmap_create(mm_context_t *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
Helper function to resolve a copy-on-write fault.
void mm_destroy_context(mm_context_t *mmctx)
Destroy a user-mode platform-dependent page table.
void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
Handle a page fault.
phyframe_t * mm_get_free_pages(size_t npages)
mm_context_t * mm_create_context(void)
Create a user-mode platform-dependent page table.
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
void mm_lock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
Lock and unlock a pair of mm_context_t objects.
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
@ VMFAULT_COPY_BACKING_PAGE
the caller should copy the backing page into the faulting address
@ VMFAULT_MAP_BACKING_PAGE
the caller should map the backing page into the faulting address
@ VMFAULT_COMPLETE
no further action is needed, the page is correctly mapped now
@ VMFAULT_CANNOT_HANDLE
the handler cannot handle this fault
@ VMFAULT_MAP_BACKING_PAGE_RO
the caller should map the backing page into the faulting address, and mark it non-writable
void mm_replace_page_locked(mm_context_t *mmctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
Replace the mappings of a page with a new physical frame.
#define pfn_phyframe(pfn)
#define phyframe_pfn(frame)
phyframe_t * pmm_allocate_frames(size_t n_frames, pmm_allocation_flags_t flags)
Allocate n_frames of contiguous physical memory.
@ PMM_ALLOC_NORMAL
allocate normal pages
bool io_munmap(io_t *io, vmap_t *vmap, bool *unmapped)
@ IPI_TYPE_INVALIDATE_TLB
void ipi_send_all(ipi_type_t type)
static sysfs_item_t sys_mem_item
static void mm_sysfs_init()
static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
static slab_t * vmap_cache
static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
static slab_t * mm_context_cache
static void do_attach_vmap(mm_context_t *mmctx, vmap_t *vmap)
static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
#define MOS_IN_RANGE(addr, start, end)
#define mos_panic(fmt,...)
static void * memcpy(void *s1, const void *s2, size_t n)
__nodiscard bool pml5_destroy_range(pml5_t pml5, ptr_t *vaddr, size_t *n_pages)
#define pml_create_table(x)
#define pr_emerg(fmt,...)
#define pr_demph(feat, fmt,...)
#define pr_dcont(feat, fmt,...)
void process_dump_mmaps(const process_t *process)
#define memzero(ptr, size)
#define MOS_INIT(_comp, _fn)
#define SLAB_AUTOINIT(name, var, type)
should_inline bool spinlock_is_locked(const spinlock_t *lock)
#define spinlock_acquire(lock)
#define spinlock_release(lock)
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
ptr_t ip
the instruction pointer which caused the fault
platform_regs_t * regs
the registers of the moment that caused the fault
phyframe_t * faulting_page
the frame that contains the copy-on-write data (if any)
const phyframe_t * backing_page
the frame that contains the data for this page, the on_fault handler should set this
struct sysfs_item_t::@8::@10 mem
vmfault_handler_t on_fault
should_inline void sysfs_register_root_file(sysfs_item_t *item)
Register an entry in the sysfs root directory.
#define SYSFS_MEM_ITEM(_name, _mmap_fn, _munmap_fn)
vm_flags mm_do_get_flags(pgd_t max, ptr_t vaddr)
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, vm_flags flags, bool do_refcount)
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, vm_flags flags)
void mm_do_unmap(pgd_t top, ptr_t vaddr, size_t n_pages, bool do_unref)