1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#pragma once
4
5#include "mos/io/io.hpp"
6#include "mos/mm/mmstat.hpp"
7#include "mos/mm/physical/pmm.hpp"
8#include "mos/platform/platform.hpp"
9
10#include <mos/allocator.hpp>
11#include <mos/lib/structures/list.hpp>
12#include <mos/lib/sync/spinlock.hpp>
13#include <mos/mm/mm_types.h>
14
15/**
16 * @defgroup mm Memory Management
17 * @brief Memory management functions and structures.
18 * @{
19 */
20
21typedef enum
22{
23 VMAP_UNKNOWN = 0,
24 VMAP_STACK, // stack (user)
25 VMAP_FILE, // file mapping
26 VMAP_MMAP, // mmap mapping
27 VMAP_DMA, // DMA mapping
28} vmap_content_t;
29
30typedef enum
31{
32 VMAP_TYPE_PRIVATE = MMAP_PRIVATE, // there will be distinct copies of the memory region in the child process
33 VMAP_TYPE_SHARED = MMAP_SHARED, // the memory region will be shared between the parent and child processes
34} vmap_type_t;
35
36typedef struct
37{
38 bool is_present, is_write, is_user, is_exec;
39 ptr_t ip; ///< the instruction pointer which caused the fault
40 platform_regs_t *regs; ///< the registers of the moment that caused the fault
41 phyframe_t *faulting_page; ///< the frame that contains the copy-on-write data (if any)
42 const phyframe_t *backing_page; ///< the frame that contains the data for this page, the on_fault handler should set this
43} pagefault_t;
44
45typedef enum
46{
47 VMFAULT_COMPLETE, ///< no further action is needed, the page is correctly mapped now
48 VMFAULT_MAP_BACKING_PAGE_RO, ///< the caller should map the backing page into the faulting address, and mark it non-writable
49 VMFAULT_MAP_BACKING_PAGE, ///< the caller should map the backing page into the faulting address
50 VMFAULT_COPY_BACKING_PAGE, ///< the caller should copy the backing page into the faulting address
51 VMFAULT_CANNOT_HANDLE = 0xff, ///< the handler cannot handle this fault
52} vmfault_result_t;
53
54struct vmap_t;
55
56typedef vmfault_result_t (*vmfault_handler_t)(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info);
57
58struct vmap_t : mos::NamedType<"VMap">
59{
60 as_linked_list;
61 spinlock_t lock;
62
63 ptr_t vaddr; // virtual addresses
64 size_t npages;
65 vm_flags vmflags; // the expected flags for the region, regardless of the copy-on-write state
66 MMContext *mmctx;
67
68 io_t *io; // the io object that (possibly) backs this vmap
69 off_t io_offset; // the offset in the io object, page-aligned
70
71 vmap_content_t content;
72 vmap_type_t type;
73 vmap_stat_t stat;
74 vmfault_handler_t on_fault;
75};
76
77#define pfn_va(pfn) ((ptr_t) (platform_info->direct_map_base + (pfn) * (MOS_PAGE_SIZE)))
78#define va_pfn(va) ((((ptr_t) (va)) - platform_info->direct_map_base) / MOS_PAGE_SIZE)
79#define va_phyframe(va) (&phyframes[va_pfn(va)])
80#define phyframe_va(frame) ((ptr_t) pfn_va(phyframe_pfn(frame)))
81#define pa_va(pa) ((ptr_t) (pa) + platform_info->direct_map_base)
82
83phyframe_t *mm_get_free_page(void);
84phyframe_t *mm_get_free_page_raw(void);
85phyframe_t *mm_get_free_pages(size_t npages);
86
87#define mm_free_page(frame) pmm_free_frames(frame, 1)
88#define mm_free_pages(frame, npages) pmm_free_frames(frame, npages)
89
90/**
91 * @brief Create a user-mode platform-dependent page table.
92 * @return MMContext The created page table.
93 * @note A platform-independent page-map is also created.
94 */
95MMContext *mm_create_context(void);
96
97/**
98 * @brief Destroy a user-mode platform-dependent page table.
99 * @param table The page table to destroy.
100 * @note The platform-independent page-map is also destroyed.
101 */
102void mm_destroy_context(MMContext *table);
103
104/**
105 * @brief Lock and unlock a pair of MMContext objects.
106 *
107 * @param ctx1 The first context
108 * @param ctx2 The second context
109 */
110void mm_lock_ctx_pair(MMContext *ctx1, MMContext *ctx2);
111void mm_unlock_ctx_pair(MMContext *ctx1, MMContext *ctx2);
112
113__nodiscard MMContext *mm_switch_context(MMContext *new_ctx);
114
115/**
116 * @brief Create a vmap object and insert it into the address space.
117 *
118 * @param mmctx The address space
119 * @param vaddr Starting virtual address of the region
120 * @param npages Number of pages in the region
121 * @return vmap_t* The created vmap object, with its lock held for further initialization.
122 */
123vmap_t *vmap_create(MMContext *mmctx, ptr_t vaddr, size_t npages);
124
125/**
126 * @brief Destroy a vmap object, and unmmap the region.
127 *
128 * @param vmap The vmap object
129 * @note The vmap object will be freed.
130 */
131void vmap_destroy(vmap_t *vmap);
132
133/**
134 * @brief Get the vmap object for a virtual address.
135 *
136 * @param mmctx The address space to search
137 * @param vaddr The virtual address
138 * @param out_offset An optional pointer to receive the offset of the address in the vmap
139 * @return vmap_t* The vmap object, or NULL if not found, with its lock held.
140 */
141vmap_t *vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset);
142
143/**
144 * @brief Split a vmap object into two, at the specified offset.
145 *
146 * @param vmap The vmap object
147 * @param split The number of pages in the first vmap object
148 * @return vmap_t* The second vmap object, with its lock held.
149 */
150vmap_t *vmap_split(vmap_t *vmap, size_t split);
151
152/**
153 * @brief Split a vmap to get a vmap object for a range of pages.
154 *
155 * @param vmap The vmap object to split
156 * @param rstart_pgoff The starting page offset
157 * @param rend_pgoff The ending page offset
158 * @return vmap_t* The vmap object for the range, with its lock held.
159 */
160vmap_t *vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff);
161
162/**
163 * @brief Finalize the initialization of a vmap object.
164 *
165 * @param vmap The vmap object
166 * @param content The content type of the region, \see vmap_content_t
167 * @param type The fork behavior of the region, \see vmap_type_t
168 * @note The vmap object must be locked, and will be unlocked after this function returns.
169 */
170void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type);
171
172/**
173 * @brief Helper function to resolve a copy-on-write fault.
174 *
175 * @param vmap The vmap object
176 * @param fault_addr The fault address
177 * @param info The page fault info
178 * @return vmfault_result_t always VMFAULT_COMPLETE
179 */
180[[nodiscard("resolve completed")]] vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info);
181
182/**
183 * @brief Handle a page fault
184 *
185 * @param fault_addr The fault address
186 * @param info The page fault info
187 */
188void mm_handle_fault(ptr_t fault_addr, pagefault_t *info);
189
190/** @} */
191