1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/mm/cow.hpp"
4
5#include "mos/mm/mm.hpp"
6#include "mos/mm/mmstat.hpp"
7#include "mos/mm/paging/paging.hpp"
8#include "mos/platform/platform.hpp"
9
10#include <mos/interrupt/ipi.hpp>
11#include <mos/mm/cow.hpp>
12#include <mos/mm/paging/paging.hpp>
13#include <mos/mm/physical/pmm.hpp>
14#include <mos/platform/platform.hpp>
15#include <mos/syslog/printk.hpp>
16#include <mos/tasks/process.hpp>
17#include <mos/tasks/task_types.hpp>
18#include <mos/types.hpp>
19#include <mos_string.hpp>
20
21static phyframe_t *_zero_page = NULL;
22static phyframe_t *zero_page(void)
23{
24 if (unlikely(!_zero_page))
25 {
26 _zero_page = pmm_ref_one(mm_get_free_page());
27 MOS_ASSERT(_zero_page);
28 memzero(s: (void *) phyframe_va(_zero_page), MOS_PAGE_SIZE);
29 }
30
31 return _zero_page;
32}
33
34static vmfault_result_t cow_zod_fault_handler(vmap_t *vmap, ptr_t fault_addr, pagefault_t *info)
35{
36 MOS_UNUSED(fault_addr);
37
38 if (info->is_present && info->is_write)
39 {
40 vmap_stat_dec(vmap, cow); // the faulting page is a CoW page
41 vmap_stat_inc(vmap, regular);
42 return mm_resolve_cow_fault(vmap, fault_addr, info);
43 }
44
45 MOS_ASSERT(!info->is_present); // we can't have (present && !write)
46
47 if (info->is_write)
48 {
49 // non-present and write, must be a ZoD page
50 info->backing_page = mm_get_free_page();
51 vmap_stat_inc(vmap, regular);
52 return VMFAULT_MAP_BACKING_PAGE;
53 }
54 else
55 {
56 info->backing_page = zero_page();
57 vmap_stat_inc(vmap, cow);
58 return VMFAULT_MAP_BACKING_PAGE_RO;
59 }
60}
61
62PtrResult<vmap_t> cow_clone_vmap_locked(MMContext *target_mmctx, vmap_t *src_vmap)
63{
64 // remove that VM_WRITE flag
65 mm_flag_pages_locked(mmctx: src_vmap->mmctx, vaddr: src_vmap->vaddr, npages: src_vmap->npages, flags: src_vmap->vmflags & ~VM_WRITE);
66 src_vmap->stat.cow += src_vmap->stat.regular;
67 src_vmap->stat.regular = 0; // no longer private
68
69 auto dst_vmap = mm_clone_vmap_locked(src_vmap, dst_ctx: target_mmctx);
70 if (dst_vmap.isErr())
71 return dst_vmap.getErr();
72
73 if (!src_vmap->on_fault)
74 src_vmap->on_fault = cow_zod_fault_handler;
75
76 dst_vmap->on_fault = src_vmap->on_fault;
77 dst_vmap->stat = dst_vmap->stat;
78 return dst_vmap;
79}
80
81PtrResult<vmap_t> cow_allocate_zeroed_pages(MMContext *mmctx, size_t npages, ptr_t vaddr, valloc_flags allocflags, vm_flags flags)
82{
83 spinlock_acquire(&mmctx->mm_lock);
84 auto vmap = mm_get_free_vaddr_locked(mmctx, n_pages: npages, base_vaddr: vaddr, flags: allocflags);
85 spinlock_release(&mmctx->mm_lock);
86
87 if (vmap.isErr())
88 return vmap;
89
90 vmap->vmflags = flags;
91 vmap->on_fault = cow_zod_fault_handler;
92 return vmap;
93}
94