13#include "mos/platform/platform_defs.hpp"
22#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
31 mEmerg <<
"failed to allocate a page";
52 mEmerg <<
"failed to allocate " << npages <<
" pages";
68 pml4.table[i] =
platform_info->kernel_mm->pgd.max.next.table[i];
83 MOS_ASSERT_X(freed,
"failed to free the entire userspace");
93 std::swap(ctx1, ctx2);
96 if (ctx1 ==
NULL || ctx1 == ctx2)
111 std::swap(ctx1, ctx2);
114 if (ctx1 ==
NULL || ctx1 == ctx2)
127 if (old_ctx == new_ctx)
146 if (m->vaddr > vmap->
vaddr)
175 bool unmapped =
false;
176 if (!vmap->
io->
unmap(vmap, &unmapped))
177 mWarn <<
"munmap: could not unmap the file: io_munmap() failed";
195 if (m->vaddr <= vaddr && vaddr < m->vaddr + m->npages *
MOS_PAGE_SIZE)
199 *out_offset = vaddr - m->vaddr;
239 if (rstart_pgoff == 0 && rend_pgoff == vmap->
npages)
242 if (rstart_pgoff == 0)
245 if (rend_pgoff == vmap->
npages)
285 mEmerg <<
"unhandled page fault: " << unhandled_reason;
286#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
288 << (info->
is_user ?
"user" :
"kernel") <<
" mode "
289 << (info->
is_write ?
"write to" : (info->
is_exec ?
"execute in" :
"read from"))
290 <<
" " << (info->
is_present ?
"present" :
"non-present") <<
" page [" << (
void *) fault_addr <<
"]";
292 mEmerg <<
" instruction: " << (
void *) info->
ip;
295 mEmerg <<
" vmap: " << ip_vmap;
296 mEmerg <<
" offset: 0x" << (info->
ip - ip_vmap->vaddr + (ip_vmap->io ? ip_vmap->io_offset : 0));
302 if (fault_addr < 1
KB)
305 mEmerg <<
" possible write to NULL pointer";
306 else if (info->
is_exec && fault_addr == 0)
307 mEmerg <<
" attempted to execute NULL pointer";
309 mEmerg <<
" possible NULL pointer dereference";
313 mEmerg <<
" kernel address dereference";
316 mEmerg <<
" in kernel function " << (
void *) info->
ip;
320 mEmerg <<
" in vmap: " << faulting_vmap;
321 mEmerg <<
" offset: 0x" << (fault_addr - faulting_vmap->vaddr + (faulting_vmap->io ? faulting_vmap->io_offset : 0));
334#if MOS_CONFIG(MOS_MM_DETAILED_UNHANDLED_FAULT)
335#if MOS_CONFIG(MOS_MM_DETAILED_MMAPS_UNHANDLED_FAULT)
340 mInfo <<
"stack trace before fault (may be unreliable):";
343 mInfo <<
"register states before fault:";
365 const char *unhandled_reason =
NULL;
370 <<
", IP=" << info->
ip
371 <<
", ADDR=" << fault_addr;
374 mos_panic(
"Cannot write and execute at the same time");
380 const auto DoUnhandledPageFault = [&]()
383 MOS_ASSERT_X(unhandled_reason,
"unhandled fault with no reason");
389 unhandled_reason =
"no mm context";
390 return DoUnhandledPageFault();
400 unhandled_reason =
"page fault in unmapped area";
402 return DoUnhandledPageFault();
411 unhandled_reason =
"page fault in non-executable vmap";
413 return DoUnhandledPageFault();
422 if (ip_vmap != fault_vmap && ip_vmap)
429 unhandled_reason =
"page fault in read-only vmap";
431 return DoUnhandledPageFault();
446 default:
return "UNKNOWN";
454 VMFlags map_flags = fault_vmap->
vmflags;
455 switch (fault_result)
460 unhandled_reason =
"vmap fault handler returned VMFAULT_CANNOT_HANDLE";
461 return DoUnhandledPageFault();
469 goto map_backing_page;
474 goto map_backing_page;
481 unhandled_reason =
"out of memory";
483 return DoUnhandledPageFault();
496 if (fault_vmap != ip_vmap)
503 DoUnhandledPageFault();
#define MOS_ASSERT_X(cond, msg,...)
long signal_send_to_thread(Thread *target, signal_t signal)
Send a signal to a thread.
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
MOSAPI void list_node_append(list_node_t *head, list_node_t *item)
#define list_foreach(t, v, h)
Iterate over a list.
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
#define list_insert_before(element, item)
MOSAPI bool list_is_empty(const list_node_t *head)
#define list_remove(element)
MMContext * mm_switch_context(MMContext *new_ctx)
#define phyframe_va(frame)
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
phyframe_t * mm_get_free_page(void)
vmap_t * vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
phyframe_t * mm_get_free_page_raw(void)
vmap_t * vmap_split(vmap_t *first, size_t split)
Split a vmap object into two, at the specified offset.
void mm_lock_context_pair(MMContext *ctx1_, MMContext *ctx2_)
Lock and unlock a pair of MMContext objects.
void mm_destroy_context(MMContext *mmctx)
Destroy a user-mode platform-dependent page table.
vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
Helper function to resolve a copy-on-write fault.
MMContext * mm_create_context(void)
Create a user-mode platform-dependent page table.
void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
Handle a page fault.
phyframe_t * mm_get_free_pages(size_t npages)
void mm_unlock_context_pair(MMContext *ctx1_, MMContext *ctx2_)
vmap_t * vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
@ VMFAULT_COPY_BACKING_PAGE
the caller should copy the backing page into the faulting address
@ VMFAULT_MAP_BACKING_PAGE
the caller should map the backing page into the faulting address
@ VMFAULT_COMPLETE
no further action is needed, the page is correctly mapped now
@ VMFAULT_CANNOT_HANDLE
the handler cannot handle this fault
@ VMFAULT_MAP_BACKING_PAGE_RO
the caller should map the backing page into the faulting address, and mark it non-writable
void mm_replace_page_locked(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, VMFlags flags)
Replace the mappings of a page with a new physical frame.
#define pfn_phyframe(pfn)
#define phyframe_pfn(frame)
phyframe_t * pmm_allocate_frames(size_t n_frames, pmm_allocation_flags_t flags)
Allocate n_frames of contiguous physical memory.
@ PMM_ALLOC_NORMAL
allocate normal pages
@ IPI_TYPE_INVALIDATE_TLB
void ipi_send_all(ipi_type_t type)
static sysfs_item_t sys_mem_item
static void mm_sysfs_init()
static void invalid_page_fault(ptr_t fault_addr, vmap_t *faulting_vmap, vmap_t *ip_vmap, pagefault_t *info, const char *unhandled_reason)
static void do_attach_vmap(MMContext *mmctx, vmap_t *vmap)
static bool sys_mem_munmap(sysfs_file_t *f, vmap_t *vmap, bool *unmapped)
static bool sys_mem_mmap(sysfs_file_t *f, vmap_t *vmap, off_t offset)
void mm_copy_page(const phyframe_t *src, const phyframe_t *dst)
#define MOS_IN_RANGE(addr, start, end)
T * create(Args &&...args)
#define mos_panic(fmt,...)
static void * memcpy(void *s1, const void *s2, size_t n)
__nodiscard bool pml5_destroy_range(pml5_t pml5, ptr_t *vaddr, size_t *n_pages)
#define pml_create_table(x)
void process_dump_mmaps(const Process *process)
#define memzero(ptr, size)
#define MOS_INIT(_comp, _fn)
should_inline bool spinlock_is_locked(const spinlock_t *lock)
#define spinlock_acquire(lock)
#define spinlock_release(lock)
bool unmap(vmap_t *vmap, bool *unmapped)
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
ptr_t ip
the instruction pointer which caused the fault
phyframe_t * faulting_page
the frame that contains the copy-on-write data (if any)
const phyframe_t * backing_page
the frame that contains the data for this page, the on_fault handler should set this
const platform_regs_t * regs
the registers of the moment that caused the fault
vmfault_handler_t on_fault
should_inline void sysfs_register_root_file(sysfs_item_t *item)
Register an entry in the sysfs root directory.
#define SYSFS_MEM_ITEM(_name, _mmap_fn, _munmap_fn)
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, VMFlags flags, bool do_refcount)
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, VMFlags flags)
VMFlags mm_do_get_flags(pgd_t max, ptr_t vaddr)
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
void mm_do_unmap(pgd_t top, ptr_t vaddr, size_t n_pages, bool do_unref)