1 | // SPDX-License-Identifier: GPL-3.0-or-later |
2 | |
3 | #pragma once |
4 | |
5 | #include "mos/io/io.h" |
6 | #include "mos/mm/mmstat.h" |
7 | #include "mos/mm/physical/pmm.h" |
8 | #include "mos/platform/platform.h" |
9 | |
10 | #include <mos/lib/structures/list.h> |
11 | #include <mos/lib/sync/spinlock.h> |
12 | #include <mos/mm/mm_types.h> |
13 | |
14 | /** |
15 | * @defgroup mm Memory Management |
16 | * @brief Memory management functions and structures. |
17 | * @{ |
18 | */ |
19 | |
20 | typedef enum |
21 | { |
22 | VMAP_UNKNOWN = 0, |
23 | VMAP_STACK, // stack (user) |
24 | VMAP_FILE, // file mapping |
25 | VMAP_MMAP, // mmap mapping |
26 | VMAP_DMA, // DMA mapping |
27 | } vmap_content_t; |
28 | |
29 | typedef enum |
30 | { |
31 | VMAP_TYPE_PRIVATE = MMAP_PRIVATE, // there will be distinct copies of the memory region in the child process |
32 | VMAP_TYPE_SHARED = MMAP_SHARED, // the memory region will be shared between the parent and child processes |
33 | } vmap_type_t; |
34 | |
35 | typedef struct |
36 | { |
37 | bool is_present, is_write, is_user, is_exec; |
38 | ptr_t ip; ///< the instruction pointer which caused the fault |
39 | platform_regs_t *regs; ///< the registers of the moment that caused the fault |
40 | phyframe_t *faulting_page; ///< the frame that contains the copy-on-write data (if any) |
41 | const phyframe_t *backing_page; ///< the frame that contains the data for this page, the on_fault handler should set this |
42 | } pagefault_t; |
43 | |
44 | typedef enum |
45 | { |
46 | VMFAULT_COMPLETE, ///< no further action is needed, the page is correctly mapped now |
47 | VMFAULT_MAP_BACKING_PAGE_RO, ///< the caller should map the backing page into the faulting address, and mark it non-writable |
48 | VMFAULT_MAP_BACKING_PAGE, ///< the caller should map the backing page into the faulting address |
49 | VMFAULT_COPY_BACKING_PAGE, ///< the caller should copy the backing page into the faulting address |
50 | VMFAULT_CANNOT_HANDLE = 0xff, ///< the handler cannot handle this fault |
51 | } vmfault_result_t; |
52 | |
53 | typedef struct _vmap vmap_t; |
54 | |
55 | typedef vmfault_result_t (*vmfault_handler_t)(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info); |
56 | |
57 | typedef struct _vmap |
58 | { |
59 | as_linked_list; |
60 | spinlock_t lock; |
61 | |
62 | ptr_t vaddr; // virtual addresses |
63 | size_t npages; |
64 | vm_flags vmflags; // the expected flags for the region, regardless of the copy-on-write state |
65 | mm_context_t *mmctx; |
66 | |
67 | io_t *io; // the io object that (possibly) backs this vmap |
68 | off_t io_offset; // the offset in the io object, page-aligned |
69 | |
70 | vmap_content_t content; |
71 | vmap_type_t type; |
72 | vmap_stat_t stat; |
73 | vmfault_handler_t on_fault; |
74 | } vmap_t; |
75 | |
76 | #define pfn_va(pfn) ((ptr_t) (platform_info->direct_map_base + (pfn) * (MOS_PAGE_SIZE))) |
77 | #define va_pfn(va) ((((ptr_t) (va)) - platform_info->direct_map_base) / MOS_PAGE_SIZE) |
78 | #define va_phyframe(va) (&phyframes[va_pfn(va)]) |
79 | #define phyframe_va(frame) ((ptr_t) pfn_va(phyframe_pfn(frame))) |
80 | #define pa_va(pa) ((ptr_t) (pa) + platform_info->direct_map_base) |
81 | |
82 | __BEGIN_DECLS |
83 | |
84 | phyframe_t *mm_get_free_page(void); |
85 | phyframe_t *mm_get_free_page_raw(void); |
86 | phyframe_t *mm_get_free_pages(size_t npages); |
87 | |
88 | #define mm_free_page(frame) pmm_free_frames(frame, 1) |
89 | #define mm_free_pages(frame, npages) pmm_free_frames(frame, npages) |
90 | |
91 | /** |
92 | * @brief Create a user-mode platform-dependent page table. |
93 | * @return mm_context_t The created page table. |
94 | * @note A platform-independent page-map is also created. |
95 | */ |
96 | mm_context_t *mm_create_context(void); |
97 | |
98 | /** |
99 | * @brief Destroy a user-mode platform-dependent page table. |
100 | * @param table The page table to destroy. |
101 | * @note The platform-independent page-map is also destroyed. |
102 | */ |
103 | void mm_destroy_context(mm_context_t *table); |
104 | |
105 | /** |
106 | * @brief Lock and unlock a pair of mm_context_t objects. |
107 | * |
108 | * @param ctx1 The first context |
109 | * @param ctx2 The second context |
110 | */ |
111 | void mm_lock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2); |
112 | void mm_unlock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2); |
113 | |
114 | __nodiscard mm_context_t *mm_switch_context(mm_context_t *new_ctx); |
115 | |
116 | /** |
117 | * @brief Create a vmap object and insert it into the address space. |
118 | * |
119 | * @param mmctx The address space |
120 | * @param vaddr Starting virtual address of the region |
121 | * @param npages Number of pages in the region |
122 | * @return vmap_t* The created vmap object, with its lock held for further initialization. |
123 | */ |
124 | vmap_t *vmap_create(mm_context_t *mmctx, ptr_t vaddr, size_t npages); |
125 | |
126 | /** |
127 | * @brief Destroy a vmap object, and unmmap the region. |
128 | * |
129 | * @param vmap The vmap object |
130 | * @note The vmap object will be freed. |
131 | */ |
132 | void vmap_destroy(vmap_t *vmap); |
133 | |
134 | /** |
135 | * @brief Get the vmap object for a virtual address. |
136 | * |
137 | * @param mmctx The address space to search |
138 | * @param vaddr The virtual address |
139 | * @param out_offset An optional pointer to receive the offset of the address in the vmap |
140 | * @return vmap_t* The vmap object, or NULL if not found, with its lock held. |
141 | */ |
142 | vmap_t *vmap_obtain(mm_context_t *mmctx, ptr_t vaddr, size_t *out_offset); |
143 | |
144 | /** |
145 | * @brief Split a vmap object into two, at the specified offset. |
146 | * |
147 | * @param vmap The vmap object |
148 | * @param split The number of pages in the first vmap object |
149 | * @return vmap_t* The second vmap object, with its lock held. |
150 | */ |
151 | vmap_t *vmap_split(vmap_t *vmap, size_t split); |
152 | |
153 | /** |
154 | * @brief Split a vmap to get a vmap object for a range of pages. |
155 | * |
156 | * @param vmap The vmap object to split |
157 | * @param rstart_pgoff The starting page offset |
158 | * @param rend_pgoff The ending page offset |
159 | * @return vmap_t* The vmap object for the range, with its lock held. |
160 | */ |
161 | vmap_t *vmap_split_for_range(vmap_t *vmap, size_t rstart_pgoff, size_t rend_pgoff); |
162 | |
163 | /** |
164 | * @brief Finalize the initialization of a vmap object. |
165 | * |
166 | * @param vmap The vmap object |
167 | * @param content The content type of the region, \see vmap_content_t |
168 | * @param type The fork behavior of the region, \see vmap_type_t |
169 | * @note The vmap object must be locked, and will be unlocked after this function returns. |
170 | */ |
171 | void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type); |
172 | |
173 | /** |
174 | * @brief Helper function to resolve a copy-on-write fault. |
175 | * |
176 | * @param vmap The vmap object |
177 | * @param fault_addr The fault address |
178 | * @param info The page fault info |
179 | * @return vmfault_result_t always VMFAULT_COMPLETE |
180 | */ |
181 | [[nodiscard("resolve completed" )]] vmfault_result_t mm_resolve_cow_fault(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info); |
182 | |
183 | /** |
184 | * @brief Handle a page fault |
185 | * |
186 | * @param fault_addr The fault address |
187 | * @param info The page fault info |
188 | */ |
189 | void mm_handle_fault(ptr_t fault_addr, pagefault_t *info); |
190 | |
191 | __END_DECLS |
192 | |
193 | /** @} */ |
194 | |