29 const ptr_t this_vaddr = vmap->vaddr;
33 if (this_vaddr < end_vaddr && this_end_vaddr > base_vaddr)
37 return ERR_PTR(-ENOMEM);
42 return ERR_PTR(-ENOMEM);
49 ptr_t retry_addr = base_vaddr;
54 return ERR_PTR(-ENOMEM);
56 const ptr_t this_vaddr = mmap->vaddr;
60 if (this_vaddr < target_vaddr_end && this_end_vaddr > retry_addr)
64 retry_addr = this_end_vaddr;
78 return ERR_PTR(-ENOMEM);
98 mos_warn(
"could not find %zd pages in the address space", npages);
138 mos_warn(
"could not find %zd pages in the address space", src_vmap->
npages);
146 dst_vmap->
io = src_vmap->
io;
164 if (vmap->vaddr <= vaddr && vaddr < vmap->vaddr + vmap->npages *
MOS_PAGE_SIZE)
175 pr_dinfo2(vmm,
"flagging %zd pages at " PTR_FMT " with flags %x", npages, vaddr, flags);
#define MOS_ASSERT_X(cond, msg,...)
#define mos_warn(fmt,...)
#define list_foreach(t, v, h)
Iterate over a list.
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
vmap_t * vmap_create(mm_context_t *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
ptr_t mm_get_phys_addr(mm_context_t *ctx, ptr_t vaddr)
void mm_replace_page_locked(mm_context_t *ctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
Replace the mappings of a page with a new physical frame.
vmap_t * mm_clone_vmap_locked(vmap_t *src_vmap, mm_context_t *dst_ctx)
Remap a block of virtual memory from one page table to another, i.e. copy the mappings.
bool mm_get_is_mapped_locked(mm_context_t *mmctx, ptr_t vaddr)
Get if a virtual address is mapped in a page table.
vmap_t * mm_map_user_pages(mm_context_t *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags, valloc_flags vaflags, vmap_type_t type, vmap_content_t content)
void mm_flag_pages_locked(mm_context_t *ctx, ptr_t vaddr, size_t npages, vm_flags flags)
Update the flags of a block of virtual memory.
void mm_map_kernel_pages(mm_context_t *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags)
Map a block of virtual memory to a block of physical memory.
vmap_t * mm_get_free_vaddr_locked(mm_context_t *mmctx, size_t n_pages, ptr_t base_vaddr, valloc_flags flags)
Gets npages unmapped free pages from a page table.
@ VALLOC_EXACT
Allocate pages at the exact address.
#define pmm_ref_one(thing)
#define pmm_unref_one(thing)
#define ALIGN_DOWN_TO_PAGE(addr)
#define pr_dinfo2(feat, fmt,...)
should_inline bool spinlock_is_locked(const spinlock_t *lock)
#define spinlock_acquire(lock)
#define spinlock_release(lock)
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
size_t regular
regular pages with no special flags being set or unset
vmfault_handler_t on_fault
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, vm_flags flags, bool do_refcount)
void mm_do_copy(pgd_t src, pgd_t dst, ptr_t vaddr, size_t n_pages)
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, vm_flags flags)