1// SPDX-License-Identifier: GPL-3.0-or-later
2
3#include "mos/mm/slab.hpp"
4
5#include "mos/assert.hpp"
6#include "mos/filesystem/sysfs/sysfs.hpp"
7#include "mos/misc/setup.hpp"
8#include "mos/mm/mm.hpp"
9
10#include <algorithm>
11#include <mos/allocator.hpp>
12#include <mos/lib/structures/list.hpp>
13#include <mos/lib/sync/spinlock.hpp>
14#include <mos/mos_global.h>
15#include <mos_stdlib.hpp>
16#include <mos_string.hpp>
17
18struct slab_header_t
19{
20 slab_t *slab;
21};
22
23struct slab_metadata_t
24{
25 size_t pages;
26 size_t size;
27};
28
29// larger slab sizes are not required, they can be allocated directly by allocating pages
30static const struct
31{
32 size_t size;
33 const char *name;
34} BUILTIN_SLAB_SIZES[] = {
35 { .size: 4, .name: "builtin-4" }, { .size: 8, .name: "builtin-8" }, { .size: 16, .name: "builtin-16" }, { .size: 24, .name: "builtin-24" }, //
36 { .size: 32, .name: "builtin-32" }, { .size: 48, .name: "builtin-48" }, { .size: 64, .name: "builtin-64" }, { .size: 96, .name: "builtin-96" }, //
37 { .size: 128, .name: "builtin-128" }, { .size: 256, .name: "builtin-256" }, { .size: 384, .name: "builtin-384" }, { .size: 512, .name: "builtin-512" }, //
38 { .size: 1024, .name: "builtin-1024" },
39};
40
41static slab_t slabs[MOS_ARRAY_SIZE(BUILTIN_SLAB_SIZES)];
42static list_head slabs_list;
43
44static inline slab_t *slab_for(size_t size)
45{
46 for (size_t i = 0; i < MOS_ARRAY_SIZE(slabs); i++)
47 {
48 slab_t *slab = &slabs[i];
49 if (slab->ent_size >= size)
50 return slab;
51 }
52 return NULL;
53}
54
55static ptr_t slab_impl_new_page(size_t n)
56{
57 phyframe_t *pages = mm_get_free_pages(npages: n);
58 if (pages == nullptr)
59 return 0;
60 mmstat_inc(type: MEM_SLAB, size: n);
61 return phyframe_va(pages);
62}
63
64static void slab_impl_free_page(ptr_t page, size_t n)
65{
66 mmstat_dec(type: MEM_SLAB, size: n);
67 mm_free_pages(va_phyframe(page), n);
68}
69
70static void slab_allocate_mem(slab_t *s)
71{
72 dInfo2<slab> << "renew slab for '" << s->name << "' with " << s->ent_size << " bytes";
73 s->first_free = slab_impl_new_page(n: 1);
74 if (unlikely(!s->first_free))
75 {
76 mos_panic("slab: failed to allocate memory for slab");
77 return;
78 }
79
80 const size_t header_offset = ALIGN_UP(sizeof(slab_header_t), s->ent_size);
81 const size_t available_size = MOS_PAGE_SIZE - header_offset;
82
83 slab_header_t *const slab_ptr = (slab_header_t *) s->first_free;
84 slab_ptr->slab = s;
85 dInfo2<slab> << "slab header is at " << (void *) slab_ptr;
86 s->first_free = (ptr_t) s->first_free + header_offset;
87
88 void **arr = (void **) s->first_free;
89 const size_t max_n = available_size / s->ent_size - 1;
90 const size_t fact = s->ent_size / sizeof(void *);
91
92 for (size_t i = 0; i < max_n; i++)
93 {
94 arr[i * fact] = &arr[(i + 1) * fact];
95 }
96 arr[max_n * fact] = NULL;
97}
98
99static void slab_init_one(slab_t *slab, const char *name, size_t size)
100{
101 MOS_ASSERT_X(size < MOS_PAGE_SIZE, "current slab implementation does not support slabs larger than a page, %zu bytes requested", size);
102 linked_list_init(list_node(slab));
103 list_node_append(head: &slabs_list, list_node(slab));
104 slab->lock = SPINLOCK_INIT;
105 slab->first_free = 0;
106 slab->nobjs = 0;
107 slab->name = name;
108 slab->type_name = "<unsure>";
109 slab->ent_size = size;
110}
111
112void slab_init(void)
113{
114 dInfo2<slab> << "initializing the slab allocator";
115 for (size_t i = 0; i < MOS_ARRAY_SIZE(BUILTIN_SLAB_SIZES); i++)
116 slab_init_one(slab: &slabs[i], name: BUILTIN_SLAB_SIZES[i].name, size: BUILTIN_SLAB_SIZES[i].size);
117}
118
119void slab_register(slab_t *s)
120{
121 dInfo2<slab> << "slab: registering slab for '" << s->name << "' with " << s->ent_size << " bytes";
122 linked_list_init(list_node(s));
123 list_node_append(head: &slabs_list, list_node(s));
124}
125
126void *slab_alloc(size_t size)
127{
128 slab_t *const slab = slab_for(size);
129 if (likely(slab))
130 return kmemcache_alloc(slab);
131
132 const size_t page_count = ALIGN_UP_TO_PAGE(size) / MOS_PAGE_SIZE;
133 const ptr_t ret = slab_impl_new_page(n: page_count + 1);
134 if (!ret)
135 return NULL;
136
137 slab_metadata_t *metadata = (slab_metadata_t *) ret;
138 metadata->pages = page_count;
139 metadata->size = size;
140
141 return (void *) ((ptr_t) ret + MOS_PAGE_SIZE);
142}
143
144void *slab_calloc(size_t nmemb, size_t size)
145{
146 void *ptr = slab_alloc(size: nmemb * size);
147 if (!ptr)
148 return NULL;
149
150 memset(s: ptr, c: 0, n: nmemb * size);
151 return ptr;
152}
153
154void *slab_realloc(void *oldptr, size_t new_size)
155{
156 if (!oldptr)
157 return slab_alloc(size: new_size);
158
159 const ptr_t addr = (ptr_t) oldptr;
160 if (is_aligned(addr, MOS_PAGE_SIZE))
161 {
162 slab_metadata_t *metadata = (slab_metadata_t *) (addr - MOS_PAGE_SIZE);
163 if (ALIGN_UP_TO_PAGE(metadata->size) == ALIGN_UP_TO_PAGE(new_size))
164 {
165 metadata->size = new_size;
166 return oldptr;
167 }
168
169 void *new_addr = slab_alloc(size: new_size);
170 if (!new_addr)
171 return NULL;
172
173 memcpy(dest: new_addr, src: oldptr, n: std::min(a: metadata->size, b: new_size));
174
175 slab_free(addr: oldptr);
176 return new_addr;
177 }
178
179 slab_header_t *slab_header = (slab_header_t *) ALIGN_DOWN_TO_PAGE(addr);
180 slab_t *slab = slab_header->slab;
181
182 if (new_size > slab->ent_size)
183 {
184 void *new_addr = slab_alloc(size: new_size);
185 if (!new_addr)
186 return NULL;
187
188 memcpy(dest: new_addr, src: oldptr, n: slab->ent_size);
189 kmemcache_free(slab, addr: oldptr);
190 return new_addr;
191 }
192
193 return oldptr;
194}
195
196void slab_free(const void *ptr)
197{
198 dInfo2<slab> << "freeing memory at " << ptr;
199 if (!ptr)
200 return;
201
202 const ptr_t addr = (ptr_t) ptr;
203 if (is_aligned(addr, MOS_PAGE_SIZE))
204 {
205 slab_metadata_t *metadata = (slab_metadata_t *) (addr - MOS_PAGE_SIZE);
206 slab_impl_free_page(page: (ptr_t) metadata, n: metadata->pages + 1);
207 return;
208 }
209
210 const slab_header_t *header = (slab_header_t *) ALIGN_DOWN_TO_PAGE(addr);
211 kmemcache_free(slab: header->slab, addr: ptr);
212}
213
214// ======================
215
216void *kmemcache_alloc(slab_t *s)
217{
218 MOS_ASSERT_X(s->ent_size > 0, "slab: invalid slab entry size %zu", s->ent_size);
219 dInfo2<slab> << "allocating from slab '" << s->name << "'";
220 spinlock_acquire(&s->lock);
221
222 if (s->first_free == 0)
223 {
224 // renew a slab
225 slab_allocate_mem(s);
226 }
227
228 ptr_t *alloc = (ptr_t *) s->first_free;
229 dCont<slab> << " -> " << (void *) alloc;
230
231 // sanitize the memory
232 MOS_ASSERT_X((ptr_t) alloc >= MOS_KERNEL_START_VADDR, "slab: invalid memory address %p", (void *) alloc);
233
234 s->first_free = *alloc; // next free entry
235 memset(s: alloc, c: 0, n: s->ent_size);
236
237 s->nobjs++;
238 spinlock_release(&s->lock);
239 return alloc;
240}
241
242void kmemcache_free(slab_t *s, const void *addr)
243{
244 dInfo2<slab> << "freeing from slab '" << s->name << "'";
245 if (!addr)
246 return;
247
248 spinlock_acquire(&s->lock);
249
250 ptr_t *new_head = (ptr_t *) addr;
251 *new_head = s->first_free;
252 s->first_free = (ptr_t) new_head;
253 s->nobjs--;
254
255 spinlock_release(&s->lock);
256}
257
258// ! sysfs support
259
260static bool slab_sysfs_slabinfo(sysfs_file_t *f)
261{
262 sysfs_printf(file: f, fmt: "%20s \t%-10s %-18s \t%-8s %s\n\n", "", "Size", "First Free", "Objects", "Type Name");
263 list_foreach(slab_t, slab, slabs_list)
264 {
265 sysfs_printf(file: f, fmt: "%20s:\t%-10zu " PTR_FMT " \t%-8zu %.*s\n", //
266 slab->name.data(), //
267 slab->ent_size, //
268 slab->first_free, //
269 slab->nobjs, //
270 (int) slab->type_name.size(), //
271 slab->type_name.data() //
272 );
273 }
274
275 return true;
276}
277
278MOS_INIT(SYSFS, slab_sysfs_init)
279{
280 static sysfs_item_t slabinfo = SYSFS_RO_ITEM("slabinfo", slab_sysfs_slabinfo);
281 sysfs_register_root_file(item: &slabinfo);
282}
283