1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/io/io.h"
4#include "mos/mm/mm.h"
5#include "mos/mm/mmstat.h"
6#include "mos/mm/paging/table_ops.h"
7
8#include <mos/lib/structures/bitmap.h>
9#include <mos/lib/structures/list.h>
10#include <mos/lib/sync/spinlock.h>
11#include <mos/mm/paging/paging.h>
12#include <mos/mm/physical/pmm.h>
13#include <mos/mos_global.h>
14#include <mos/platform/platform.h>
15#include <mos/syslog/printk.h>
16#include <mos_stdlib.h>
17
18vmap_t *mm_get_free_vaddr_locked(mm_context_t *mmctx, size_t n_pages, ptr_t base_vaddr, valloc_flags flags)
19{
20 MOS_ASSERT_X(spinlock_is_locked(&mmctx->mm_lock), "insane mmctx->mm_lock state");
21 MOS_ASSERT_X(base_vaddr < MOS_KERNEL_START_VADDR, "Use mm_get_free_pages instead");
22
23 if (flags & VALLOC_EXACT)
24 {
25 const ptr_t end_vaddr = base_vaddr + n_pages * MOS_PAGE_SIZE;
26 // we need to find a free area that starts at base_vaddr
27 list_foreach(vmap_t, vmap, mmctx->mmaps)
28 {
29 const ptr_t this_vaddr = vmap->vaddr;
30 const ptr_t this_end_vaddr = this_vaddr + vmap->npages * MOS_PAGE_SIZE;
31
32 // see if this vmap overlaps with the area we want to allocate
33 if (this_vaddr < end_vaddr && this_end_vaddr > base_vaddr)
34 {
35 // this mmap overlaps with the area we want to allocate
36 // so we can't allocate here
37 return ERR_PTR(error: -ENOMEM);
38 }
39 }
40
41 if (end_vaddr > MOS_USER_END_VADDR)
42 return ERR_PTR(error: -ENOMEM);
43
44 // nothing seems to overlap
45 return vmap_create(mmctx, vaddr: base_vaddr, npages: n_pages);
46 }
47 else
48 {
49 ptr_t retry_addr = base_vaddr;
50 list_foreach(vmap_t, mmap, mmctx->mmaps)
51 {
52 // we've reached the end of the user address space?
53 if (retry_addr + n_pages * MOS_PAGE_SIZE > MOS_USER_END_VADDR)
54 return ERR_PTR(error: -ENOMEM);
55
56 const ptr_t this_vaddr = mmap->vaddr;
57 const ptr_t this_end_vaddr = this_vaddr + mmap->npages * MOS_PAGE_SIZE;
58
59 const ptr_t target_vaddr_end = retry_addr + n_pages * MOS_PAGE_SIZE;
60 if (this_vaddr < target_vaddr_end && this_end_vaddr > retry_addr)
61 {
62 // this mmap overlaps with the area we want to allocate
63 // so we can't allocate here
64 retry_addr = this_end_vaddr; // try the next area
65 }
66
67 if (retry_addr + n_pages * MOS_PAGE_SIZE <= this_vaddr)
68 {
69 // we've found a free area that is large enough
70 return vmap_create(mmctx, vaddr: retry_addr, npages: n_pages);
71 }
72 }
73
74 // we've reached the end of the list, no matter it's empty or not
75 if (retry_addr + n_pages * MOS_PAGE_SIZE <= MOS_USER_END_VADDR)
76 return vmap_create(mmctx, vaddr: retry_addr, npages: n_pages);
77
78 return ERR_PTR(error: -ENOMEM);
79 }
80}
81
82void mm_map_kernel_pages(mm_context_t *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags)
83{
84 MOS_ASSERT(vaddr >= MOS_KERNEL_START_VADDR);
85 MOS_ASSERT(npages > 0);
86 spinlock_acquire(&mmctx->mm_lock);
87 pr_dinfo2(vmm, "mapping %zd pages at " PTR_FMT " to pfn " PFN_FMT, npages, vaddr, pfn);
88 mm_do_map(top: mmctx->pgd, vaddr, pfn, n_pages: npages, flags, do_refcount: false);
89 spinlock_release(&mmctx->mm_lock);
90}
91
92vmap_t *mm_map_user_pages(mm_context_t *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags, valloc_flags vaflags, vmap_type_t type, vmap_content_t content)
93{
94 spinlock_acquire(&mmctx->mm_lock);
95 vmap_t *vmap = mm_get_free_vaddr_locked(mmctx, n_pages: npages, base_vaddr: vaddr, flags: vaflags);
96 if (unlikely(!vmap))
97 {
98 mos_warn("could not find %zd pages in the address space", npages);
99 spinlock_release(&mmctx->mm_lock);
100 return NULL;
101 }
102
103 pr_dinfo2(vmm, "mapping %zd pages at " PTR_FMT " to pfn " PFN_FMT, npages, vmap->vaddr, pfn);
104 vmap->vmflags = flags;
105 vmap->stat.regular = npages;
106 mm_do_map(top: mmctx->pgd, vaddr: vmap->vaddr, pfn, n_pages: npages, flags, do_refcount: false);
107 spinlock_release(&mmctx->mm_lock);
108 vmap_finalise_init(vmap, content, type);
109 return vmap;
110}
111
112void mm_replace_page_locked(mm_context_t *ctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
113{
114 vaddr = ALIGN_DOWN_TO_PAGE(vaddr);
115 pr_dinfo2(vmm, "filling page at " PTR_FMT " with " PFN_FMT, vaddr, pfn);
116
117 const pfn_t old_pfn = mm_do_get_pfn(top: ctx->pgd, vaddr);
118
119 if (unlikely(old_pfn == pfn))
120 {
121 mos_warn("trying to replace page at " PTR_FMT " with the same page " PFN_FMT, vaddr, pfn);
122 return;
123 }
124
125 if (likely(old_pfn))
126 pmm_unref_one(old_pfn); // unmapped
127
128 pmm_ref_one(pfn);
129 mm_do_map(top: ctx->pgd, vaddr, pfn, n_pages: 1, flags, do_refcount: false);
130}
131
132vmap_t *mm_clone_vmap_locked(vmap_t *src_vmap, mm_context_t *dst_ctx)
133{
134 vmap_t *dst_vmap = mm_get_free_vaddr_locked(mmctx: dst_ctx, n_pages: src_vmap->npages, base_vaddr: src_vmap->vaddr, flags: VALLOC_EXACT);
135
136 if (unlikely(!dst_vmap))
137 {
138 mos_warn("could not find %zd pages in the address space", src_vmap->npages);
139 return NULL;
140 }
141
142 pr_dinfo2(vmm, "copying mapping from " PTR_FMT ", %zu pages", src_vmap->vaddr, src_vmap->npages);
143 mm_do_copy(src: src_vmap->mmctx->pgd, dst: dst_vmap->mmctx->pgd, vaddr: src_vmap->vaddr, n_pages: src_vmap->npages);
144
145 dst_vmap->vmflags = src_vmap->vmflags;
146 dst_vmap->io = src_vmap->io;
147 dst_vmap->io_offset = src_vmap->io_offset;
148 dst_vmap->content = src_vmap->content;
149 dst_vmap->type = src_vmap->type;
150 dst_vmap->stat = src_vmap->stat;
151 dst_vmap->on_fault = src_vmap->on_fault;
152
153 if (src_vmap->io)
154 io_ref(io: src_vmap->io);
155
156 return dst_vmap;
157}
158
159bool mm_get_is_mapped_locked(mm_context_t *mmctx, ptr_t vaddr)
160{
161 MOS_ASSERT(spinlock_is_locked(&mmctx->mm_lock));
162 list_foreach(vmap_t, vmap, mmctx->mmaps)
163 {
164 if (vmap->vaddr <= vaddr && vaddr < vmap->vaddr + vmap->npages * MOS_PAGE_SIZE)
165 return true;
166 }
167
168 return false;
169}
170
171void mm_flag_pages_locked(mm_context_t *ctx, ptr_t vaddr, size_t npages, vm_flags flags)
172{
173 MOS_ASSERT(npages > 0);
174 MOS_ASSERT(spinlock_is_locked(&ctx->mm_lock));
175 pr_dinfo2(vmm, "flagging %zd pages at " PTR_FMT " with flags %x", npages, vaddr, flags);
176 mm_do_flag(top: ctx->pgd, vaddr, n_pages: npages, flags);
177}
178
179ptr_t mm_get_phys_addr(mm_context_t *ctx, ptr_t vaddr)
180{
181 pfn_t pfn = mm_do_get_pfn(top: ctx->pgd, vaddr);
182 return pfn << PML1_SHIFT | (vaddr % MOS_PAGE_SIZE);
183}
184