1 | // SPDX-License-Identifier: GPL-3.0-or-later |
2 | // SPDX-License-Identifier: BSD-2-Clause |
3 | |
4 | #include "mos/mm/slab.hpp" |
5 | |
6 | #include "mos/assert.hpp" |
7 | #include "mos/filesystem/sysfs/sysfs.hpp" |
8 | #include "mos/misc/setup.hpp" |
9 | #include "mos/mm/mm.hpp" |
10 | #include "mos/syslog/printk.hpp" |
11 | |
12 | #include <algorithm> |
13 | #include <mos/allocator.hpp> |
14 | #include <mos/lib/structures/list.hpp> |
15 | #include <mos/lib/sync/spinlock.hpp> |
16 | #include <mos/mos_global.h> |
17 | #include <mos_stdlib.hpp> |
18 | #include <mos_string.hpp> |
19 | |
20 | struct |
21 | { |
22 | slab_t *; |
23 | }; |
24 | |
25 | struct slab_metadata_t |
26 | { |
27 | size_t pages; |
28 | size_t size; |
29 | }; |
30 | |
31 | // larger slab sizes are not required, they can be allocated directly by allocating pages |
32 | static const struct |
33 | { |
34 | size_t size; |
35 | const char *name; |
36 | } BUILTIN_SLAB_SIZES[] = { |
37 | { .size: 4, .name: "builtin-4" }, { .size: 8, .name: "builtin-8" }, { .size: 16, .name: "builtin-16" }, { .size: 24, .name: "builtin-24" }, // |
38 | { .size: 32, .name: "builtin-32" }, { .size: 48, .name: "builtin-48" }, { .size: 64, .name: "builtin-64" }, { .size: 96, .name: "builtin-96" }, // |
39 | { .size: 128, .name: "builtin-128" }, { .size: 256, .name: "builtin-256" }, { .size: 384, .name: "builtin-384" }, { .size: 512, .name: "builtin-512" }, // |
40 | { .size: 1024, .name: "builtin-1024" }, |
41 | }; |
42 | |
43 | static slab_t slabs[MOS_ARRAY_SIZE(BUILTIN_SLAB_SIZES)]; |
44 | static list_head slabs_list; |
45 | |
46 | static inline slab_t *slab_for(size_t size) |
47 | { |
48 | for (size_t i = 0; i < MOS_ARRAY_SIZE(slabs); i++) |
49 | { |
50 | slab_t *slab = &slabs[i]; |
51 | if (slab->ent_size >= size) |
52 | return slab; |
53 | } |
54 | return NULL; |
55 | } |
56 | |
57 | static ptr_t slab_impl_new_page(size_t n) |
58 | { |
59 | phyframe_t *pages = mm_get_free_pages(npages: n); |
60 | mmstat_inc(type: MEM_SLAB, size: n); |
61 | return phyframe_va(pages); |
62 | } |
63 | |
64 | static void slab_impl_free_page(ptr_t page, size_t n) |
65 | { |
66 | mmstat_dec(type: MEM_SLAB, size: n); |
67 | mm_free_pages(va_phyframe(page), n); |
68 | } |
69 | |
70 | static void slab_allocate_mem(slab_t *slab) |
71 | { |
72 | pr_dinfo2(slab, "renew slab for '%.*s' with %zu bytes" , slab->name.size(), slab->name.data(), slab->ent_size); |
73 | slab->first_free = slab_impl_new_page(n: 1); |
74 | if (unlikely(!slab->first_free)) |
75 | { |
76 | mos_panic("slab: failed to allocate memory for slab" ); |
77 | return; |
78 | } |
79 | |
80 | const size_t = ALIGN_UP(sizeof(slab_header_t), slab->ent_size); |
81 | const size_t available_size = MOS_PAGE_SIZE - header_offset; |
82 | |
83 | slab_header_t *const slab_ptr = (slab_header_t *) slab->first_free; |
84 | slab_ptr->slab = slab; |
85 | pr_dinfo2(slab, "slab header is at %p" , (void *) slab_ptr); |
86 | slab->first_free = (ptr_t) slab->first_free + header_offset; |
87 | |
88 | void **arr = (void **) slab->first_free; |
89 | const size_t max_n = available_size / slab->ent_size - 1; |
90 | const size_t fact = slab->ent_size / sizeof(void *); |
91 | |
92 | for (size_t i = 0; i < max_n; i++) |
93 | { |
94 | arr[i * fact] = &arr[(i + 1) * fact]; |
95 | } |
96 | arr[max_n * fact] = NULL; |
97 | } |
98 | |
99 | static void slab_init_one(slab_t *slab, const char *name, size_t size) |
100 | { |
101 | MOS_ASSERT_X(size < MOS_PAGE_SIZE, "current slab implementation does not support slabs larger than a page, %zu bytes requested" , size); |
102 | linked_list_init(list_node(slab)); |
103 | list_node_append(head: &slabs_list, list_node(slab)); |
104 | slab->lock = SPINLOCK_INIT; |
105 | slab->first_free = 0; |
106 | slab->nobjs = 0; |
107 | slab->name = name; |
108 | slab->type_name = "<unsure>" ; |
109 | slab->ent_size = size; |
110 | } |
111 | |
112 | void slab_init(void) |
113 | { |
114 | pr_dinfo2(slab, "initializing the slab allocator" ); |
115 | for (size_t i = 0; i < MOS_ARRAY_SIZE(BUILTIN_SLAB_SIZES); i++) |
116 | { |
117 | slab_init_one(slab: &slabs[i], name: BUILTIN_SLAB_SIZES[i].name, size: BUILTIN_SLAB_SIZES[i].size); |
118 | slab_allocate_mem(slab: &slabs[i]); |
119 | } |
120 | } |
121 | |
122 | void slab_register(slab_t *slab) |
123 | { |
124 | pr_info2("slab: registering slab for '%s' with %zu bytes" , slab->name.data(), slab->ent_size); |
125 | linked_list_init(list_node(slab)); |
126 | list_node_append(head: &slabs_list, list_node(slab)); |
127 | } |
128 | |
129 | void *slab_alloc(size_t size) |
130 | { |
131 | slab_t *const slab = slab_for(size); |
132 | if (likely(slab)) |
133 | return kmemcache_alloc(slab); |
134 | |
135 | const size_t page_count = ALIGN_UP_TO_PAGE(size) / MOS_PAGE_SIZE; |
136 | const ptr_t ret = slab_impl_new_page(n: page_count + 1); |
137 | if (!ret) |
138 | return NULL; |
139 | |
140 | slab_metadata_t *metadata = (slab_metadata_t *) ret; |
141 | metadata->pages = page_count; |
142 | metadata->size = size; |
143 | |
144 | return (void *) ((ptr_t) ret + MOS_PAGE_SIZE); |
145 | } |
146 | |
147 | void *slab_calloc(size_t nmemb, size_t size) |
148 | { |
149 | void *ptr = slab_alloc(size: nmemb * size); |
150 | if (!ptr) |
151 | return NULL; |
152 | |
153 | memset(s: ptr, c: 0, n: nmemb * size); |
154 | return ptr; |
155 | } |
156 | |
157 | void *slab_realloc(void *oldptr, size_t new_size) |
158 | { |
159 | if (!oldptr) |
160 | return slab_alloc(size: new_size); |
161 | |
162 | const ptr_t addr = (ptr_t) oldptr; |
163 | if (is_aligned(addr, MOS_PAGE_SIZE)) |
164 | { |
165 | slab_metadata_t *metadata = (slab_metadata_t *) (addr - MOS_PAGE_SIZE); |
166 | if (ALIGN_UP_TO_PAGE(metadata->size) == ALIGN_UP_TO_PAGE(new_size)) |
167 | { |
168 | metadata->size = new_size; |
169 | return oldptr; |
170 | } |
171 | |
172 | void *new_addr = slab_alloc(size: new_size); |
173 | if (!new_addr) |
174 | return NULL; |
175 | |
176 | memcpy(dest: new_addr, src: oldptr, n: std::min(a: metadata->size, b: new_size)); |
177 | |
178 | slab_free(addr: oldptr); |
179 | return new_addr; |
180 | } |
181 | |
182 | slab_header_t * = (slab_header_t *) ALIGN_DOWN_TO_PAGE(addr); |
183 | slab_t *slab = slab_header->slab; |
184 | |
185 | if (new_size > slab->ent_size) |
186 | { |
187 | void *new_addr = slab_alloc(size: new_size); |
188 | if (!new_addr) |
189 | return NULL; |
190 | |
191 | memcpy(dest: new_addr, src: oldptr, n: slab->ent_size); |
192 | kmemcache_free(slab, addr: oldptr); |
193 | return new_addr; |
194 | } |
195 | |
196 | return oldptr; |
197 | } |
198 | |
199 | void slab_free(const void *ptr) |
200 | { |
201 | pr_dinfo2(slab, "freeing memory at %p" , ptr); |
202 | if (!ptr) |
203 | return; |
204 | |
205 | const ptr_t addr = (ptr_t) ptr; |
206 | if (is_aligned(addr, MOS_PAGE_SIZE)) |
207 | { |
208 | slab_metadata_t *metadata = (slab_metadata_t *) (addr - MOS_PAGE_SIZE); |
209 | slab_impl_free_page(page: (ptr_t) metadata, n: metadata->pages + 1); |
210 | return; |
211 | } |
212 | |
213 | const slab_header_t * = (slab_header_t *) ALIGN_DOWN_TO_PAGE(addr); |
214 | kmemcache_free(slab: header->slab, addr: ptr); |
215 | } |
216 | |
217 | // ====================== |
218 | |
219 | void *kmemcache_alloc(slab_t *slab) |
220 | { |
221 | MOS_ASSERT_X(slab->ent_size > 0, "slab: invalid slab entry size %zu" , slab->ent_size); |
222 | pr_dinfo2(slab, "allocating from slab '%s'" , slab->name.data()); |
223 | spinlock_acquire(&slab->lock); |
224 | |
225 | if (slab->first_free == 0) |
226 | { |
227 | // renew a slab |
228 | slab_allocate_mem(slab); |
229 | } |
230 | |
231 | ptr_t *alloc = (ptr_t *) slab->first_free; |
232 | pr_dcont(slab, " -> %p" , (void *) alloc); |
233 | |
234 | // sanitize the memory |
235 | MOS_ASSERT_X((ptr_t) alloc >= MOS_KERNEL_START_VADDR, "slab: invalid memory address %p" , (void *) alloc); |
236 | |
237 | slab->first_free = *alloc; // next free entry |
238 | memset(s: alloc, c: 0, n: slab->ent_size); |
239 | |
240 | slab->nobjs++; |
241 | spinlock_release(&slab->lock); |
242 | return alloc; |
243 | } |
244 | |
245 | void kmemcache_free(slab_t *slab, const void *addr) |
246 | { |
247 | pr_dinfo2(slab, "freeing from slab '%s'" , slab->name.data()); |
248 | if (!addr) |
249 | return; |
250 | |
251 | spinlock_acquire(&slab->lock); |
252 | |
253 | ptr_t *new_head = (ptr_t *) addr; |
254 | *new_head = slab->first_free; |
255 | slab->first_free = (ptr_t) new_head; |
256 | slab->nobjs--; |
257 | |
258 | spinlock_release(&slab->lock); |
259 | } |
260 | |
261 | // ! sysfs support |
262 | |
263 | static bool slab_sysfs_slabinfo(sysfs_file_t *f) |
264 | { |
265 | sysfs_printf(file: f, fmt: "%20s \t%-10s %-18s \t%-8s %s\n\n" , "" , "Size" , "First Free" , "Objects" , "Type Name" ); |
266 | list_foreach(slab_t, slab, slabs_list) |
267 | { |
268 | sysfs_printf(file: f, fmt: "%20s:\t%-10zu " PTR_FMT " \t%-8zu %.*s\n" , // |
269 | slab->name.data(), // |
270 | slab->ent_size, // |
271 | slab->first_free, // |
272 | slab->nobjs, // |
273 | (int) slab->type_name.size(), // |
274 | slab->type_name.data() // |
275 | ); |
276 | } |
277 | |
278 | return true; |
279 | } |
280 | |
281 | MOS_INIT(SYSFS, slab_sysfs_init) |
282 | { |
283 | static sysfs_item_t slabinfo = SYSFS_RO_ITEM("slabinfo" , slab_sysfs_slabinfo); |
284 | sysfs_register_root_file(item: &slabinfo); |
285 | } |
286 | |