37 bool is_present, is_write, is_user,
is_exec;
53typedef struct _vmap
vmap_t;
76#define pfn_va(pfn) ((ptr_t) (platform_info->direct_map_base + (pfn) * (MOS_PAGE_SIZE)))
77#define va_pfn(va) ((((ptr_t) (va)) - platform_info->direct_map_base) / MOS_PAGE_SIZE)
78#define va_phyframe(va) (&phyframes[va_pfn(va)])
79#define phyframe_va(frame) ((ptr_t) pfn_va(phyframe_pfn(frame)))
80#define pa_va(pa) ((ptr_t) (pa) + platform_info->direct_map_base)
88#define mm_free_page(frame) pmm_free_frames(frame, 1)
89#define mm_free_pages(frame, npages) pmm_free_frames(frame, npages)
__BEGIN_DECLS phyframe_t * mm_get_free_page(void)
__nodiscard mm_context_t * mm_switch_context(mm_context_t *new_ctx)
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
vmap_t * vmap_obtain(mm_context_t *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
phyframe_t * mm_get_free_page_raw(void)
vmap_t * vmap_split(vmap_t *vmap, size_t split)
Split a vmap object into two, at the specified offset.
void mm_unlock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
vmap_t * vmap_create(mm_context_t *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
Helper function to resolve a copy-on-write fault.
vmfault_result_t(* vmfault_handler_t)(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
void mm_destroy_context(mm_context_t *table)
Destroy a user-mode platform-dependent page table.
void mm_handle_fault(ptr_t fault_addr, pagefault_t *info)
Handle a page fault.
phyframe_t * mm_get_free_pages(size_t npages)
mm_context_t * mm_create_context(void)
Create a user-mode platform-dependent page table.
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
void mm_lock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
Lock and unlock a pair of mm_context_t objects.
vmap_t * vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff)
Split a vmap to get a vmap object for a range of pages.
@ VMFAULT_COPY_BACKING_PAGE
the caller should copy the backing page into the faulting address
@ VMFAULT_MAP_BACKING_PAGE
the caller should map the backing page into the faulting address
@ VMFAULT_COMPLETE
no further action is needed, the page is correctly mapped now
@ VMFAULT_CANNOT_HANDLE
the handler cannot handle this fault
@ VMFAULT_MAP_BACKING_PAGE_RO
the caller should map the backing page into the faulting address, and mark it non-writable
ptr_t ip
the instruction pointer which caused the fault
platform_regs_t * regs
the registers of the moment that caused the fault
phyframe_t * faulting_page
the frame that contains the copy-on-write data (if any)
const phyframe_t * backing_page
the frame that contains the data for this page, the on_fault handler should set this
Memory usage statistics for a specific vmap area.
vmfault_handler_t on_fault