1// SPDX-License-Identifier: GPL-3.0-or-later
2// SPDX-License-Identifier: BSD-2-Clause
3
4#include "mos/mm/slab.h"
5
6#include "mos/filesystem/sysfs/sysfs.h"
7#include "mos/misc/setup.h"
8#include "mos/mm/mm.h"
9#include "mos/syslog/printk.h"
10
11#include <mos/lib/structures/list.h>
12#include <mos/lib/sync/spinlock.h>
13#include <mos/mos_global.h>
14#include <mos_stdlib.h>
15#include <mos_string.h>
16
17typedef struct
18{
19 slab_t *slab;
20} slab_header_t;
21
22typedef struct
23{
24 size_t pages;
25 size_t size;
26} slab_metadata_t;
27
28static const struct
29{
30 size_t size;
31 const char *name;
32} BUILTIN_SLAB_SIZES[] = {
33 { 4, "builtin-4" }, { 8, "builtin-8" }, { 16, "builtin-16" }, { 24, "builtin-24" }, //
34 { 32, "builtin-32" }, { 48, "builtin-48" }, { 64, "builtin-64" }, { 96, "builtin-96" }, //
35 { 128, "builtin-128" }, { 256, "builtin-256" }, { 384, "builtin-384" }, { 512, "builtin-512" }, //
36 { 1024, "builtin-1024" },
37 // larger slab sizes are not required
38 // they can be allocated directly by allocating pages
39};
40
41static slab_t slab_slab = { 0 };
42
43static slab_t slabs[MOS_ARRAY_SIZE(BUILTIN_SLAB_SIZES)] = { 0 };
44static list_head slabs_list = LIST_HEAD_INIT(slabs_list);
45
46static inline slab_t *slab_for(size_t size)
47{
48 for (size_t i = 0; i < MOS_ARRAY_SIZE(slabs); i++)
49 {
50 slab_t *slab = &slabs[i];
51 if (slab->ent_size >= size)
52 return slab;
53 }
54 return NULL;
55}
56
57static ptr_t slab_impl_new_page(size_t n)
58{
59 phyframe_t *pages = mm_get_free_pages(npages: n);
60 mmstat_inc(type: MEM_SLAB, size: n);
61 return phyframe_va(pages);
62}
63
64static void slab_impl_free_page(ptr_t page, size_t n)
65{
66 mmstat_dec(type: MEM_SLAB, size: n);
67 mm_free_pages(va_phyframe(page), n);
68}
69
70static void slab_init_one(slab_t *slab, const char *name, size_t size)
71{
72 MOS_ASSERT_X(size < MOS_PAGE_SIZE, "current slab implementation does not support slabs larger than a page, %zu bytes requested", size);
73 pr_dinfo2(slab, "slab: registering slab for '%s' with %zu bytes", name, size);
74 linked_list_init(list_node(slab));
75 list_node_append(head: &slabs_list, list_node(slab));
76 slab->lock = (spinlock_t) SPINLOCK_INIT;
77 slab->first_free = 0;
78 slab->nobjs = 0;
79 slab->name = name;
80 slab->ent_size = size;
81}
82
83static void slab_allocate_mem(slab_t *slab)
84{
85 pr_dinfo2(slab, "renew slab for '%s' with %zu bytes", slab->name, slab->ent_size);
86 slab->first_free = slab_impl_new_page(n: 1);
87 if (unlikely(!slab->first_free))
88 {
89 mos_panic("slab: failed to allocate memory for slab");
90 return;
91 }
92
93 const size_t header_offset = ALIGN_UP(sizeof(slab_header_t), slab->ent_size);
94 const size_t available_size = MOS_PAGE_SIZE - header_offset;
95
96 slab_header_t *const slab_ptr = (slab_header_t *) slab->first_free;
97 slab_ptr->slab = slab;
98 pr_dinfo2(slab, "slab header is at %p", (void *) slab_ptr);
99 slab->first_free = (ptr_t) slab->first_free + header_offset;
100
101 void **arr = (void **) slab->first_free;
102 const size_t max_n = available_size / slab->ent_size - 1;
103 const size_t fact = slab->ent_size / sizeof(void *);
104
105 for (size_t i = 0; i < max_n; i++)
106 {
107 arr[i * fact] = &arr[(i + 1) * fact];
108 }
109 arr[max_n * fact] = NULL;
110}
111
112static void slab_init(void)
113{
114 pr_dinfo2(slab, "initializing the slab allocator");
115
116 slab_init_one(slab: &slab_slab, name: "slab_t", size: sizeof(slab_t));
117 slab_allocate_mem(slab: &slab_slab);
118
119 for (size_t i = 0; i < MOS_ARRAY_SIZE(BUILTIN_SLAB_SIZES); i++)
120 {
121 slab_init_one(slab: &slabs[i], name: BUILTIN_SLAB_SIZES[i].name, size: BUILTIN_SLAB_SIZES[i].size);
122 slab_allocate_mem(slab: &slabs[i]);
123 }
124}
125
126MOS_INIT(POST_MM, slab_init);
127
128static void kmemcache_free(slab_t *slab, const void *addr);
129
130void *slab_alloc(size_t size)
131{
132 slab_t *const slab = slab_for(size);
133 if (likely(slab))
134 return kmemcache_alloc(slab);
135
136 const size_t page_count = ALIGN_UP_TO_PAGE(size) / MOS_PAGE_SIZE;
137 const ptr_t ret = slab_impl_new_page(n: page_count + 1);
138 if (!ret)
139 return NULL;
140
141 slab_metadata_t *metadata = (slab_metadata_t *) ret;
142 metadata->pages = page_count;
143 metadata->size = size;
144
145 return (void *) ((ptr_t) ret + MOS_PAGE_SIZE);
146}
147
148void *slab_calloc(size_t nmemb, size_t size)
149{
150 void *ptr = slab_alloc(size: nmemb * size);
151 if (!ptr)
152 return NULL;
153
154 memset(s: ptr, c: 0, n: nmemb * size);
155 return ptr;
156}
157
158void *slab_realloc(void *oldptr, size_t new_size)
159{
160 if (!oldptr)
161 return slab_alloc(size: new_size);
162
163 const ptr_t addr = (ptr_t) oldptr;
164 if (is_aligned(addr, MOS_PAGE_SIZE))
165 {
166 slab_metadata_t *metadata = (slab_metadata_t *) (addr - MOS_PAGE_SIZE);
167 if (ALIGN_UP_TO_PAGE(metadata->size) == ALIGN_UP_TO_PAGE(new_size))
168 {
169 metadata->size = new_size;
170 return oldptr;
171 }
172
173 void *new_addr = slab_alloc(size: new_size);
174 if (!new_addr)
175 return NULL;
176
177 memcpy(dest: new_addr, src: oldptr, MIN(metadata->size, new_size));
178
179 slab_free(addr: oldptr);
180 return new_addr;
181 }
182
183 slab_header_t *slab_header = (slab_header_t *) ALIGN_DOWN_TO_PAGE(addr);
184 slab_t *slab = slab_header->slab;
185
186 if (new_size > slab->ent_size)
187 {
188 void *new_addr = slab_alloc(size: new_size);
189 if (!new_addr)
190 return NULL;
191
192 memcpy(dest: new_addr, src: oldptr, n: slab->ent_size);
193 kmemcache_free(slab, addr: oldptr);
194 return new_addr;
195 }
196
197 return oldptr;
198}
199
200void slab_free(const void *ptr)
201{
202 if (!ptr)
203 return;
204
205 const ptr_t addr = (ptr_t) ptr;
206 if (is_aligned(addr, MOS_PAGE_SIZE))
207 {
208 slab_metadata_t *metadata = (slab_metadata_t *) (addr - MOS_PAGE_SIZE);
209 slab_impl_free_page(page: (ptr_t) metadata, n: metadata->pages + 1);
210 return;
211 }
212
213 const slab_header_t *header = (slab_header_t *) ALIGN_DOWN_TO_PAGE(addr);
214 kmemcache_free(slab: header->slab, addr: ptr);
215}
216
217// ======================
218
219slab_t *kmemcache_create(const char *name, size_t ent_size)
220{
221 slab_t *slab = kmemcache_alloc(slab: &slab_slab);
222 slab_init_one(slab, name, size: ent_size);
223 slab_allocate_mem(slab);
224 return slab;
225}
226
227void *kmemcache_alloc(slab_t *slab)
228{
229 pr_dinfo2(slab, "allocating from slab '%s'", slab->name);
230 spinlock_acquire(&slab->lock);
231
232 if (slab->first_free == 0)
233 {
234 // renew a slab
235 slab_allocate_mem(slab);
236 }
237
238 ptr_t *alloc = (ptr_t *) slab->first_free;
239 pr_dcont(slab, " -> %p", (void *) alloc);
240
241 // sanitize the memory
242 MOS_ASSERT_X((ptr_t) alloc >= MOS_KERNEL_START_VADDR, "slab: invalid memory address %p", (void *) alloc);
243
244 slab->first_free = *alloc; // next free entry
245 memset(s: alloc, c: 0, n: slab->ent_size);
246
247 slab->nobjs++;
248 spinlock_release(&slab->lock);
249 return alloc;
250}
251
252static void kmemcache_free(slab_t *slab, const void *addr)
253{
254 pr_dinfo2(slab, "freeing from slab '%s'", slab->name);
255 if (!addr)
256 return;
257
258 spinlock_acquire(&slab->lock);
259
260 ptr_t *new_head = (ptr_t *) addr;
261 *new_head = slab->first_free;
262 slab->first_free = (ptr_t) new_head;
263 slab->nobjs--;
264
265 spinlock_release(&slab->lock);
266}
267
268// ! sysfs support
269
270static bool slab_sysfs_slabinfo(sysfs_file_t *f)
271{
272 list_foreach(slab_t, slab, slabs_list)
273 {
274 sysfs_printf(file: f, fmt: "%15s, ent_size=%5zu, first_free=" PTR_FMT ", %5zu objects\n", slab->name, slab->ent_size, slab->first_free, slab->nobjs);
275 }
276
277 return true;
278}
279
280MOS_INIT(SYSFS, slab_sysfs_init)
281{
282 static sysfs_item_t slabinfo = SYSFS_RO_ITEM("slabinfo", slab_sysfs_slabinfo);
283 sysfs_register_root_file(item: &slabinfo);
284}
285