1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/filesystem/page_cache.h"
4
5#include "mos/filesystem/vfs_types.h"
6#include "mos/filesystem/vfs_utils.h"
7#include "mos/mm/mm.h"
8#include "mos/mm/mmstat.h"
9#include "mos/mm/physical/pmm.h"
10#include "mos/syslog/printk.h"
11
12#include <mos/mos_global.h>
13#include <mos_stdlib.h>
14#include <mos_string.h>
15
16struct _flush_and_drop_data
17{
18 inode_cache_t *icache;
19 bool should_drop_page;
20 long ret;
21};
22
23static bool do_flush_and_drop_cached_page(const uintn key, void *value, void *data)
24{
25 // key = page number, value = phyframe_t *, data = _flush_and_drop_data *
26 struct _flush_and_drop_data *const fdd = data;
27 inode_cache_t *const icache = fdd->icache;
28 phyframe_t *const page = value;
29 off_t const pgoff = key;
30 const bool drop_page = fdd->should_drop_page;
31
32 // !! TODO
33 // !! currently we have no reliable way to track if a page is dirty
34 // !! so we always flush it
35 // !! this causes performance issues, but it's simple and works for now
36 // if (!page->pagecache.dirty)
37
38 long ret = 0;
39 if (icache->ops->flush_page)
40 ret = icache->ops->flush_page(icache, pgoff, page);
41 else
42 ret = simple_flush_page_discard_data(icache, pgoff, page);
43
44 if (!IS_ERR_VALUE(ret) && drop_page)
45 {
46 // only when the page was successfully flushed
47 hashmap_remove(map: &icache->pages, key: pgoff);
48 mmstat_dec1(MEM_PAGECACHE);
49 pmm_unref_one(page);
50 }
51
52 fdd->ret = ret;
53 return ret == 0;
54}
55
56long pagecache_flush_or_drop(inode_cache_t *icache, off_t pgoff, size_t npages, bool drop_page)
57{
58 struct _flush_and_drop_data data = { .icache = icache, .should_drop_page = drop_page };
59
60 long ret = 0;
61 for (size_t i = 0; i < npages; i++)
62 {
63 phyframe_t *page = hashmap_get(map: &icache->pages, key: pgoff + i);
64 if (!page)
65 continue;
66
67 do_flush_and_drop_cached_page(key: pgoff + i, value: page, data: &data);
68
69 if (data.ret != 0)
70 {
71 ret = data.ret;
72 break;
73 }
74 }
75 return ret;
76}
77
78long pagecache_flush_or_drop_all(inode_cache_t *icache, bool drop_page)
79{
80 struct _flush_and_drop_data data = { .icache = icache, .should_drop_page = drop_page };
81 hashmap_foreach(map: &icache->pages, func: do_flush_and_drop_cached_page, data: &data);
82 return data.ret;
83}
84
85phyframe_t *pagecache_get_page_for_read(inode_cache_t *cache, off_t pgoff)
86{
87 phyframe_t *page = hashmap_get(map: &cache->pages, key: pgoff);
88 if (page)
89 return page; // fast path
90
91 if (!cache->ops)
92 return ERR_PTR(error: -EIO);
93
94 MOS_ASSERT_X(cache->ops && cache->ops->fill_cache, "no page cache ops for inode %p", (void *) cache->owner);
95 page = cache->ops->fill_cache(cache, pgoff);
96 if (IS_ERR(ptr: page))
97 return page;
98 mmstat_inc1(MEM_PAGECACHE);
99 MOS_ASSERT(hashmap_put(&cache->pages, pgoff, page) == NULL);
100 return page;
101}
102
103phyframe_t *pagecache_get_page_for_write(inode_cache_t *cache, off_t pgoff)
104{
105 return pagecache_get_page_for_read(cache, pgoff);
106}
107
108ssize_t vfs_read_pagecache(inode_cache_t *icache, void *buf, size_t size, off_t offset)
109{
110 mutex_acquire(mutex: &icache->lock);
111 size_t bytes_read = 0;
112 size_t bytes_left = size;
113 while (bytes_left > 0)
114 {
115 // bytes to copy from the current page
116 const size_t inpage_offset = offset % MOS_PAGE_SIZE;
117 const size_t inpage_size = MIN(MOS_PAGE_SIZE - inpage_offset, bytes_left); // in case we're at the end of the file,
118
119 phyframe_t *page = pagecache_get_page_for_read(cache: icache, pgoff: offset / MOS_PAGE_SIZE); // the initial page
120 if (IS_ERR(ptr: page))
121 {
122 mutex_release(mutex: &icache->lock);
123 return PTR_ERR(ptr: page);
124 }
125
126 memcpy(dest: (char *) buf + bytes_read, src: (void *) (phyframe_va(page) + inpage_offset), n: inpage_size);
127
128 bytes_read += inpage_size;
129 bytes_left -= inpage_size;
130 offset += inpage_size;
131 }
132
133 mutex_release(mutex: &icache->lock);
134 return bytes_read;
135}
136
137ssize_t vfs_write_pagecache(inode_cache_t *icache, const void *buf, size_t total_size, off_t offset)
138{
139 const inode_cache_ops_t *ops = icache->ops;
140 MOS_ASSERT_X(ops, "no page cache ops for inode %p", (void *) icache->owner);
141
142 mutex_acquire(mutex: &icache->lock);
143
144 size_t bytes_written = 0;
145 size_t bytes_left = total_size;
146 while (bytes_left > 0)
147 {
148 // bytes to copy to the current page
149 const size_t inpage_offset = offset % MOS_PAGE_SIZE;
150 const size_t inpage_size = MIN(MOS_PAGE_SIZE - inpage_offset, bytes_left); // in case we're at the end of the file,
151
152 void *private;
153 phyframe_t *page;
154 const bool can_write = ops->page_write_begin(icache, offset, inpage_size, &page, &private);
155 if (!can_write)
156 {
157 pr_warn("page_write_begin failed");
158 mutex_release(mutex: &icache->lock);
159 return -EIO;
160 }
161
162 memcpy(dest: (char *) (phyframe_va(page) + inpage_offset), src: (char *) buf + bytes_written, n: inpage_size);
163 ops->page_write_end(icache, offset, inpage_size, page, private);
164
165 bytes_written += inpage_size;
166 bytes_left -= inpage_size;
167 offset += inpage_size;
168 }
169
170 mutex_release(mutex: &icache->lock);
171 return bytes_written;
172}
173