1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/io/io.hpp"
4#include "mos/mm/mm.hpp"
5#include "mos/mm/mmstat.hpp"
6#include "mos/mm/paging/table_ops.hpp"
7
8#include <mos/lib/structures/bitmap.hpp>
9#include <mos/lib/structures/list.hpp>
10#include <mos/lib/sync/spinlock.hpp>
11#include <mos/mm/paging/paging.hpp>
12#include <mos/mm/physical/pmm.hpp>
13#include <mos/mos_global.h>
14#include <mos/platform/platform.hpp>
15#include <mos/syslog/printk.hpp>
16#include <mos_stdlib.hpp>
17
18PtrResult<vmap_t> mm_get_free_vaddr_locked(MMContext *mmctx, size_t n_pages, ptr_t base_vaddr, valloc_flags flags)
19{
20 MOS_ASSERT_X(spinlock_is_locked(&mmctx->mm_lock), "insane mmctx->mm_lock state");
21 MOS_ASSERT_X(base_vaddr < MOS_KERNEL_START_VADDR, "Use mm_get_free_pages instead");
22
23 if (flags & VALLOC_EXACT)
24 {
25 const ptr_t end_vaddr = base_vaddr + n_pages * MOS_PAGE_SIZE;
26 // we need to find a free area that starts at base_vaddr
27 list_foreach(vmap_t, vmap, mmctx->mmaps)
28 {
29 const ptr_t this_vaddr = vmap->vaddr;
30 const ptr_t this_end_vaddr = this_vaddr + vmap->npages * MOS_PAGE_SIZE;
31
32 // see if this vmap overlaps with the area we want to allocate
33 if (this_vaddr < end_vaddr && this_end_vaddr > base_vaddr)
34 {
35 // this mmap overlaps with the area we want to allocate
36 // so we can't allocate here
37 return -ENOMEM;
38 }
39 }
40
41 if (end_vaddr > MOS_USER_END_VADDR)
42 return -ENOMEM;
43
44 // nothing seems to overlap
45 return vmap_create(mmctx, vaddr: base_vaddr, npages: n_pages);
46 }
47 else
48 {
49 ptr_t retry_addr = base_vaddr;
50 list_foreach(vmap_t, mmap, mmctx->mmaps)
51 {
52 // we've reached the end of the user address space?
53 if (retry_addr + n_pages * MOS_PAGE_SIZE > MOS_USER_END_VADDR)
54 return -ENOMEM;
55
56 const ptr_t this_vaddr = mmap->vaddr;
57 const ptr_t this_end_vaddr = this_vaddr + mmap->npages * MOS_PAGE_SIZE;
58
59 const ptr_t target_vaddr_end = retry_addr + n_pages * MOS_PAGE_SIZE;
60 if (this_vaddr < target_vaddr_end && this_end_vaddr > retry_addr)
61 {
62 // this mmap overlaps with the area we want to allocate
63 // so we can't allocate here
64 retry_addr = this_end_vaddr; // try the next area
65 }
66
67 if (retry_addr + n_pages * MOS_PAGE_SIZE <= this_vaddr)
68 {
69 // we've found a free area that is large enough
70 return vmap_create(mmctx, vaddr: retry_addr, npages: n_pages);
71 }
72 }
73
74 // we've reached the end of the list, no matter it's empty or not
75 if (retry_addr + n_pages * MOS_PAGE_SIZE <= MOS_USER_END_VADDR)
76 return vmap_create(mmctx, vaddr: retry_addr, npages: n_pages);
77
78 return -ENOMEM;
79 }
80}
81
82void mm_map_kernel_pages(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags)
83{
84 MOS_ASSERT(vaddr >= MOS_KERNEL_START_VADDR);
85 MOS_ASSERT(npages > 0);
86 spinlock_acquire(&mmctx->mm_lock);
87 pr_dinfo2(vmm, "mapping %zd pages at " PTR_FMT " to pfn " PFN_FMT, npages, vaddr, pfn);
88 mm_do_map(top: mmctx->pgd, vaddr, pfn, n_pages: npages, flags, do_refcount: false);
89 spinlock_release(&mmctx->mm_lock);
90}
91
92PtrResult<vmap_t> mm_map_user_pages(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags, valloc_flags vaflags, vmap_type_t type,
93 vmap_content_t content)
94{
95 spinlock_acquire(&mmctx->mm_lock);
96 auto vmap = mm_get_free_vaddr_locked(mmctx, n_pages: npages, base_vaddr: vaddr, flags: vaflags);
97 if (unlikely(vmap.isErr()))
98 {
99 mos_warn("could not find %zd pages in the address space", npages);
100 spinlock_release(&mmctx->mm_lock);
101 return -ENOMEM;
102 }
103
104 pr_dinfo2(vmm, "mapping %zd pages at " PTR_FMT " to pfn " PFN_FMT, npages, vmap->vaddr, pfn);
105 vmap->vmflags = flags;
106 vmap->stat.regular = npages;
107 mm_do_map(top: mmctx->pgd, vaddr: vmap->vaddr, pfn, n_pages: npages, flags, do_refcount: false);
108 spinlock_release(&mmctx->mm_lock);
109 vmap_finalise_init(vmap: &*vmap, content, type);
110 return vmap;
111}
112
113void mm_replace_page_locked(MMContext *ctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
114{
115 vaddr = ALIGN_DOWN_TO_PAGE(vaddr);
116 pr_dinfo2(vmm, "filling page at " PTR_FMT " with " PFN_FMT, vaddr, pfn);
117
118 const pfn_t old_pfn = mm_do_get_pfn(top: ctx->pgd, vaddr);
119
120 if (unlikely(old_pfn == pfn))
121 {
122 mos_panic("trying to replace page at " PTR_FMT " with the same page " PFN_FMT, vaddr, pfn);
123 return;
124 }
125
126 if (likely(old_pfn))
127 pmm_unref_one(old_pfn); // unmapped
128
129 pmm_ref_one(pfn);
130 mm_do_map(top: ctx->pgd, vaddr, pfn, n_pages: 1, flags, do_refcount: false);
131}
132
133PtrResult<vmap_t> mm_clone_vmap_locked(vmap_t *src_vmap, MMContext *dst_ctx)
134{
135 auto dst_vmap = mm_get_free_vaddr_locked(mmctx: dst_ctx, n_pages: src_vmap->npages, base_vaddr: src_vmap->vaddr, flags: VALLOC_EXACT);
136
137 if (unlikely(dst_vmap.isErr()))
138 {
139 mos_warn("could not find %zd pages in the address space", src_vmap->npages);
140 return nullptr;
141 }
142
143 pr_dinfo2(vmm, "copying mapping from " PTR_FMT ", %zu pages", src_vmap->vaddr, src_vmap->npages);
144 mm_do_copy(src: src_vmap->mmctx->pgd, dst: dst_vmap->mmctx->pgd, vaddr: src_vmap->vaddr, n_pages: src_vmap->npages);
145
146 dst_vmap->vmflags = src_vmap->vmflags;
147 dst_vmap->io = src_vmap->io;
148 dst_vmap->io_offset = src_vmap->io_offset;
149 dst_vmap->content = src_vmap->content;
150 dst_vmap->type = src_vmap->type;
151 dst_vmap->stat = src_vmap->stat;
152 dst_vmap->on_fault = src_vmap->on_fault;
153
154 if (src_vmap->io)
155 io_ref(io: src_vmap->io);
156
157 return dst_vmap;
158}
159
160bool mm_get_is_mapped_locked(MMContext *mmctx, ptr_t vaddr)
161{
162 MOS_ASSERT(spinlock_is_locked(&mmctx->mm_lock));
163 list_foreach(vmap_t, vmap, mmctx->mmaps)
164 {
165 if (vmap->vaddr <= vaddr && vaddr < vmap->vaddr + vmap->npages * MOS_PAGE_SIZE)
166 return true;
167 }
168
169 return false;
170}
171
172void mm_flag_pages_locked(MMContext *ctx, ptr_t vaddr, size_t npages, vm_flags flags)
173{
174 MOS_ASSERT(npages > 0);
175 MOS_ASSERT(spinlock_is_locked(&ctx->mm_lock));
176 pr_dinfo2(vmm, "flagging %zd pages at " PTR_FMT " with flags %x", npages, vaddr, flags);
177 mm_do_flag(top: ctx->pgd, vaddr, n_pages: npages, flags);
178}
179
180ptr_t mm_get_phys_addr(MMContext *ctx, ptr_t vaddr)
181{
182 pfn_t pfn = mm_do_get_pfn(top: ctx->pgd, vaddr);
183 return pfn << PML1_SHIFT | (vaddr % MOS_PAGE_SIZE);
184}
185