MOS Source Code
Loading...
Searching...
No Matches
paging.cpp
Go to the documentation of this file.
1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/io/io.hpp"
4#include "mos/mm/mm.hpp"
5#include "mos/mm/mmstat.hpp"
7
13#include <mos/mos_global.h>
15#include <mos/syslog/printk.hpp>
16#include <mos_stdlib.hpp>
17
18PtrResult<vmap_t> mm_get_free_vaddr_locked(MMContext *mmctx, size_t n_pages, ptr_t base_vaddr, valloc_flags flags)
19{
20 MOS_ASSERT_X(spinlock_is_locked(&mmctx->mm_lock), "insane mmctx->mm_lock state");
21 MOS_ASSERT_X(base_vaddr < MOS_KERNEL_START_VADDR, "Use mm_get_free_pages instead");
22
23 if (flags & VALLOC_EXACT)
24 {
25 const ptr_t end_vaddr = base_vaddr + n_pages * MOS_PAGE_SIZE;
26 // we need to find a free area that starts at base_vaddr
27 list_foreach(vmap_t, vmap, mmctx->mmaps)
28 {
29 const ptr_t this_vaddr = vmap->vaddr;
30 const ptr_t this_end_vaddr = this_vaddr + vmap->npages * MOS_PAGE_SIZE;
31
32 // see if this vmap overlaps with the area we want to allocate
33 if (this_vaddr < end_vaddr && this_end_vaddr > base_vaddr)
34 {
35 // this mmap overlaps with the area we want to allocate
36 // so we can't allocate here
37 return -ENOMEM;
38 }
39 }
40
41 if (end_vaddr > MOS_USER_END_VADDR)
42 return -ENOMEM;
43
44 // nothing seems to overlap
45 return vmap_create(mmctx, base_vaddr, n_pages);
46 }
47 else
48 {
49 ptr_t retry_addr = base_vaddr;
50 list_foreach(vmap_t, mmap, mmctx->mmaps)
51 {
52 // we've reached the end of the user address space?
53 if (retry_addr + n_pages * MOS_PAGE_SIZE > MOS_USER_END_VADDR)
54 return -ENOMEM;
55
56 const ptr_t this_vaddr = mmap->vaddr;
57 const ptr_t this_end_vaddr = this_vaddr + mmap->npages * MOS_PAGE_SIZE;
58
59 const ptr_t target_vaddr_end = retry_addr + n_pages * MOS_PAGE_SIZE;
60 if (this_vaddr < target_vaddr_end && this_end_vaddr > retry_addr)
61 {
62 // this mmap overlaps with the area we want to allocate
63 // so we can't allocate here
64 retry_addr = this_end_vaddr; // try the next area
65 }
66
67 if (retry_addr + n_pages * MOS_PAGE_SIZE <= this_vaddr)
68 {
69 // we've found a free area that is large enough
70 return vmap_create(mmctx, retry_addr, n_pages);
71 }
72 }
73
74 // we've reached the end of the list, no matter it's empty or not
75 if (retry_addr + n_pages * MOS_PAGE_SIZE <= MOS_USER_END_VADDR)
76 return vmap_create(mmctx, retry_addr, n_pages);
77
78 return -ENOMEM;
79 }
80}
81
82void mm_map_kernel_pages(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags)
83{
85 MOS_ASSERT(npages > 0);
87 pr_dinfo2(vmm, "mapping %zd pages at " PTR_FMT " to pfn " PFN_FMT, npages, vaddr, pfn);
88 mm_do_map(mmctx->pgd, vaddr, pfn, npages, flags, false);
90}
91
92PtrResult<vmap_t> mm_map_user_pages(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags, valloc_flags vaflags, vmap_type_t type,
93 vmap_content_t content)
94{
96 auto vmap = mm_get_free_vaddr_locked(mmctx, npages, vaddr, vaflags);
97 if (unlikely(vmap.isErr()))
98 {
99 mos_warn("could not find %zd pages in the address space", npages);
100 spinlock_release(&mmctx->mm_lock);
101 return -ENOMEM;
102 }
103
104 pr_dinfo2(vmm, "mapping %zd pages at " PTR_FMT " to pfn " PFN_FMT, npages, vmap->vaddr, pfn);
105 vmap->vmflags = flags;
106 vmap->stat.regular = npages;
107 mm_do_map(mmctx->pgd, vmap->vaddr, pfn, npages, flags, false);
108 spinlock_release(&mmctx->mm_lock);
109 vmap_finalise_init(&*vmap, content, type);
110 return vmap;
111}
112
114{
115 vaddr = ALIGN_DOWN_TO_PAGE(vaddr);
116 pr_dinfo2(vmm, "filling page at " PTR_FMT " with " PFN_FMT, vaddr, pfn);
117
118 const pfn_t old_pfn = mm_do_get_pfn(ctx->pgd, vaddr);
119
120 if (unlikely(old_pfn == pfn))
121 {
122 mos_panic("trying to replace page at " PTR_FMT " with the same page " PFN_FMT, vaddr, pfn);
123 return;
124 }
125
126 if (likely(old_pfn))
127 pmm_unref_one(old_pfn); // unmapped
128
129 pmm_ref_one(pfn);
130 mm_do_map(ctx->pgd, vaddr, pfn, 1, flags, false);
131}
132
134{
135 auto dst_vmap = mm_get_free_vaddr_locked(dst_ctx, src_vmap->npages, src_vmap->vaddr, VALLOC_EXACT);
136
137 if (unlikely(dst_vmap.isErr()))
138 {
139 mos_warn("could not find %zd pages in the address space", src_vmap->npages);
140 return nullptr;
141 }
142
143 pr_dinfo2(vmm, "copying mapping from " PTR_FMT ", %zu pages", src_vmap->vaddr, src_vmap->npages);
144 mm_do_copy(src_vmap->mmctx->pgd, dst_vmap->mmctx->pgd, src_vmap->vaddr, src_vmap->npages);
145
146 dst_vmap->vmflags = src_vmap->vmflags;
147 dst_vmap->io = src_vmap->io;
148 dst_vmap->io_offset = src_vmap->io_offset;
149 dst_vmap->content = src_vmap->content;
150 dst_vmap->type = src_vmap->type;
151 dst_vmap->stat = src_vmap->stat;
152 dst_vmap->on_fault = src_vmap->on_fault;
153
154 if (src_vmap->io)
155 io_ref(src_vmap->io);
156
157 return dst_vmap;
158}
159
161{
163 list_foreach(vmap_t, vmap, mmctx->mmaps)
164 {
165 if (vmap->vaddr <= vaddr && vaddr < vmap->vaddr + vmap->npages * MOS_PAGE_SIZE)
166 return true;
167 }
168
169 return false;
170}
171
172void mm_flag_pages_locked(MMContext *ctx, ptr_t vaddr, size_t npages, vm_flags flags)
173{
174 MOS_ASSERT(npages > 0);
176 pr_dinfo2(vmm, "flagging %zd pages at " PTR_FMT " with flags %x", npages, vaddr, flags);
177 mm_do_flag(ctx->pgd, vaddr, npages, flags);
178}
179
181{
182 pfn_t pfn = mm_do_get_pfn(ctx->pgd, vaddr);
183 return pfn << PML1_SHIFT | (vaddr % MOS_PAGE_SIZE);
184}
#define MOS_ASSERT_X(cond, msg,...)
Definition assert.hpp:15
#define MOS_ASSERT(cond)
Definition assert.hpp:14
#define mos_warn(fmt,...)
Definition assert.hpp:23
#define MOS_PAGE_SIZE
Definition autoconf.h:6
#define list_foreach(t, v, h)
Iterate over a list.
Definition list.hpp:89
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
Definition mm.cpp:250
vmap_type_t
Definition mm.hpp:31
vmap_t * vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages)
Create a vmap object and insert it into the address space.
Definition mm.cpp:152
vmap_content_t
Definition mm.hpp:22
valloc_flags
Definition paging.hpp:20
void mm_map_kernel_pages(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags)
Map a block of virtual memory to a block of physical memory.
Definition paging.cpp:82
bool mm_get_is_mapped_locked(MMContext *mmctx, ptr_t vaddr)
Get if a virtual address is mapped in a page table.
Definition paging.cpp:160
PtrResult< vmap_t > mm_map_user_pages(MMContext *mmctx, ptr_t vaddr, pfn_t pfn, size_t npages, vm_flags flags, valloc_flags vaflags, vmap_type_t type, vmap_content_t content)
Definition paging.cpp:92
void mm_replace_page_locked(MMContext *ctx, ptr_t vaddr, pfn_t pfn, vm_flags flags)
Replace the mappings of a page with a new physical frame.
Definition paging.cpp:113
ptr_t mm_get_phys_addr(MMContext *ctx, ptr_t vaddr)
Definition paging.cpp:180
void mm_flag_pages_locked(MMContext *ctx, ptr_t vaddr, size_t npages, vm_flags flags)
Update the flags of a block of virtual memory.
Definition paging.cpp:172
PtrResult< vmap_t > mm_clone_vmap_locked(vmap_t *src_vmap, MMContext *dst_ctx)
Remap a block of virtual memory from one page table to another, i.e. copy the mappings.
Definition paging.cpp:133
PtrResult< vmap_t > mm_get_free_vaddr_locked(MMContext *mmctx, size_t n_pages, ptr_t base_vaddr, valloc_flags flags)
Gets npages unmapped free pages from a page table.
Definition paging.cpp:18
@ VALLOC_EXACT
Allocate pages at the exact address.
Definition paging.hpp:22
#define pmm_ref_one(thing)
Definition pmm.hpp:159
#define pmm_unref_one(thing)
Definition pmm.hpp:160
io_t * io_ref(io_t *io)
Definition io.cpp:74
#define likely(x)
Definition mos_global.h:39
#define ALIGN_DOWN_TO_PAGE(addr)
Definition mos_global.h:77
#define unlikely(x)
Definition mos_global.h:40
#define mos_panic(fmt,...)
Definition panic.hpp:51
vm_flags
Definition platform.hpp:42
#define pr_dinfo2(feat, fmt,...)
Definition printk.hpp:27
#define MOS_KERNEL_START_VADDR
#define MOS_USER_END_VADDR
#define PML1_SHIFT
should_inline bool spinlock_is_locked(const spinlock_t *lock)
Definition spinlock.hpp:71
#define spinlock_acquire(lock)
Definition spinlock.hpp:64
#define spinlock_release(lock)
Definition spinlock.hpp:65
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
Definition platform.hpp:86
list_head mmaps
Definition platform.hpp:88
pgd_t pgd
Definition platform.hpp:87
Definition mm.hpp:59
vmap_content_t content
Definition mm.hpp:71
ptr_t vaddr
Definition mm.hpp:63
size_t npages
Definition mm.hpp:64
vmfault_handler_t on_fault
Definition mm.hpp:74
vm_flags vmflags
Definition mm.hpp:65
vmap_stat_t stat
Definition mm.hpp:73
io_t * io
Definition mm.hpp:68
MMContext * mmctx
Definition mm.hpp:66
off_t io_offset
Definition mm.hpp:69
vmap_type_t type
Definition mm.hpp:72
void mm_do_map(pgd_t top, ptr_t vaddr, pfn_t pfn, size_t n_pages, vm_flags flags, bool do_refcount)
Definition table_ops.cpp:23
void mm_do_copy(pgd_t src, pgd_t dst, ptr_t vaddr, size_t n_pages)
Definition table_ops.cpp:57
pfn_t mm_do_get_pfn(pgd_t top, ptr_t vaddr)
Definition table_ops.cpp:67
void mm_do_flag(pgd_t top, ptr_t vaddr, size_t n_pages, vm_flags flags)
Definition table_ops.cpp:29
unsigned long long pfn_t
Definition types.h:37
#define PFN_FMT
Definition types.h:38
#define PTR_FMT
Definition types.h:29
unsigned long ptr_t
Definition types.h:21