1 | // SPDX-License-Identifier: GPL-3.0-or-later |
2 | |
3 | #include "mos/lib/sync/spinlock.h" |
4 | #include "mos/x86/devices/port.h" |
5 | #include "uacpi/kernel_api.h" |
6 | #include "uacpi/platform/arch_helpers.h" |
7 | #include "uacpi/status.h" |
8 | |
9 | #include <bits/posix/posix_stdio.h> |
10 | #include <chrono> |
11 | #include <cstdio> |
12 | #include <cstdlib> |
13 | #include <fcntl.h> |
14 | #include <iostream> |
15 | #include <mutex> |
16 | #include <pthread.h> |
17 | #include <string.h> |
18 | #include <sys/mman.h> |
19 | #include <thread> |
20 | #include <unistd.h> |
21 | #include <vector> |
22 | |
23 | extern fd_t mem_fd; |
24 | |
25 | /* |
26 | * Raw IO API, this is only used for accessing verified data from |
27 | * "safe" code (aka not indirectly invoked by the AML interpreter), |
28 | * e.g. programming FADT & FACS registers. |
29 | * |
30 | * NOTE: |
31 | * 'byte_width' is ALWAYS one of 1, 2, 4, 8. You are NOT allowed to implement |
32 | * this in terms of memcpy, as hardware expects accesses to be of the EXACT |
33 | * width. |
34 | * ------------------------------------------------------------------------- |
35 | */ |
36 | uacpi_status uacpi_kernel_raw_memory_read(uacpi_phys_addr address, uacpi_u8 byte_width, uacpi_u64 *out_value) |
37 | { |
38 | switch (byte_width) |
39 | { |
40 | case 1: *out_value = *(uacpi_u8 *) address; break; |
41 | case 2: *out_value = *(uacpi_u16 *) address; break; |
42 | case 4: *out_value = *(uacpi_u32 *) address; break; |
43 | case 8: *out_value = *(uacpi_u64 *) address; break; |
44 | default: return UACPI_STATUS_INVALID_ARGUMENT; |
45 | }; |
46 | |
47 | return UACPI_STATUS_OK; |
48 | } |
49 | |
50 | uacpi_status uacpi_kernel_raw_memory_write(uacpi_phys_addr address, uacpi_u8 byte_width, uacpi_u64 in_value) |
51 | { |
52 | switch (byte_width) |
53 | { |
54 | case 1: *(uacpi_u8 *) address = in_value; break; |
55 | case 2: *(uacpi_u16 *) address = in_value; break; |
56 | case 4: *(uacpi_u32 *) address = in_value; break; |
57 | case 8: *(uacpi_u64 *) address = in_value; break; |
58 | default: return UACPI_STATUS_INVALID_ARGUMENT; |
59 | }; |
60 | |
61 | return UACPI_STATUS_OK; |
62 | } |
63 | |
64 | /* |
65 | * NOTE: |
66 | * 'byte_width' is ALWAYS one of 1, 2, 4. You are NOT allowed to break e.g. a |
67 | * 4-byte access into four 1-byte accesses. Hardware ALWAYS expects accesses to |
68 | * be of the exact width. |
69 | */ |
70 | uacpi_status uacpi_kernel_raw_io_read(uacpi_io_addr port, uacpi_u8 width, uacpi_u64 *data) |
71 | { |
72 | switch (width) |
73 | { |
74 | case 1: *data = port_inb(port); break; |
75 | case 2: *data = port_inw(port); break; |
76 | case 4: *data = port_inl(port); break; |
77 | default: return UACPI_STATUS_INVALID_ARGUMENT; |
78 | } |
79 | |
80 | return UACPI_STATUS_OK; |
81 | } |
82 | |
83 | uacpi_status uacpi_kernel_raw_io_write(uacpi_io_addr port, uacpi_u8 width, uacpi_u64 data) |
84 | { |
85 | switch (width) |
86 | { |
87 | case 1: port_outb(port, value: data); break; |
88 | case 2: port_outw(port, value: data); break; |
89 | case 4: port_outl(port, value: data); break; |
90 | default: return UACPI_STATUS_INVALID_ARGUMENT; |
91 | } |
92 | |
93 | return UACPI_STATUS_OK; |
94 | } |
95 | |
96 | // ------------------------------------------------------------------------- |
97 | |
98 | /* |
99 | * NOTE: |
100 | * 'byte_width' is ALWAYS one of 1, 2, 4. Since PCI registers are 32 bits wide |
101 | * this must be able to handle e.g. a 1-byte access by reading at the nearest |
102 | * 4-byte aligned offset below, then masking the value to select the target |
103 | * byte. |
104 | */ |
105 | uacpi_status uacpi_kernel_pci_read(uacpi_pci_address *, uacpi_size, uacpi_u8, uacpi_u64 *) |
106 | { |
107 | std::cout << "uacpi_kernel_pci_read" << std::endl; |
108 | return UACPI_STATUS_OK; |
109 | } |
110 | |
111 | uacpi_status uacpi_kernel_pci_write(uacpi_pci_address *, uacpi_size, uacpi_u8, uacpi_u64) |
112 | { |
113 | std::cout << "uacpi_kernel_pci_write" << std::endl; |
114 | return UACPI_STATUS_OK; |
115 | } |
116 | |
117 | /* |
118 | * Map a SystemIO address at [base, base + len) and return a kernel-implemented |
119 | * handle that can be used for reading and writing the IO range. |
120 | */ |
121 | uacpi_status uacpi_kernel_io_map(uacpi_io_addr base, uacpi_size, uacpi_handle *out_handle) |
122 | { |
123 | *out_handle = reinterpret_cast<uacpi_handle>(base); |
124 | return UACPI_STATUS_OK; |
125 | } |
126 | |
127 | void uacpi_kernel_io_unmap(uacpi_handle) |
128 | { |
129 | } |
130 | |
131 | /* |
132 | * Read/Write the IO range mapped via uacpi_kernel_io_map |
133 | * at a 0-based 'offset' within the range. |
134 | * |
135 | * NOTE: |
136 | * 'byte_width' is ALWAYS one of 1, 2, 4. You are NOT allowed to break e.g. a |
137 | * 4-byte access into four 1-byte accesses. Hardware ALWAYS expects accesses to |
138 | * be of the exact width. |
139 | */ |
140 | uacpi_status uacpi_kernel_io_read(uacpi_handle handle, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 *value) |
141 | { |
142 | auto addr = reinterpret_cast<uacpi_io_addr>(handle); |
143 | return uacpi_kernel_raw_io_read(port: addr + offset, width: byte_width, data: value); |
144 | } |
145 | |
146 | uacpi_status uacpi_kernel_io_write(uacpi_handle handle, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 value) |
147 | { |
148 | auto addr = reinterpret_cast<uacpi_io_addr>(handle); |
149 | return uacpi_kernel_raw_io_write(port: addr + offset, width: byte_width, data: value); |
150 | } |
151 | |
152 | void *uacpi_kernel_map(uacpi_phys_addr paddr, uacpi_size size) |
153 | { |
154 | off_t page_offset = paddr % MOS_PAGE_SIZE; |
155 | const auto npages = ALIGN_UP_TO_PAGE(size) / MOS_PAGE_SIZE; |
156 | |
157 | if (paddr == MOS_FOURCC('R', 'S', 'D', 'P')) |
158 | { |
159 | fd_t rsdp_fd = open(path: "/sys/acpi/RSDP" , O_RDONLY); |
160 | if (rsdp_fd < 0) |
161 | return nullptr; |
162 | |
163 | void *ptr = mmap(nullptr, npages * MOS_PAGE_SIZE, PROT_READ, MAP_SHARED, rsdp_fd, 0); |
164 | close(fd: rsdp_fd); |
165 | return ptr; |
166 | } |
167 | |
168 | paddr = ALIGN_DOWN_TO_PAGE(paddr); |
169 | void *ptr = mmap(nullptr, npages * MOS_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, mem_fd, paddr); |
170 | if (ptr == MAP_FAILED) |
171 | return nullptr; |
172 | |
173 | return static_cast<uacpi_u8 *>(ptr) + page_offset; |
174 | } |
175 | |
176 | void uacpi_kernel_unmap(void *ptr, uacpi_size size) |
177 | { |
178 | munmap(ptr, size); |
179 | } |
180 | |
181 | /* |
182 | * Allocate a block of memory of 'size' bytes. |
183 | * The contents of the allocated memory are unspecified. |
184 | */ |
185 | void *uacpi_kernel_alloc(uacpi_size size) |
186 | { |
187 | return malloc(size); |
188 | } |
189 | |
190 | /* |
191 | * Allocate a block of memory of 'count' * 'size' bytes. |
192 | * The returned memory block is expected to be zero-filled. |
193 | */ |
194 | void *uacpi_kernel_calloc(uacpi_size count, uacpi_size size) |
195 | { |
196 | return calloc(count, size); |
197 | } |
198 | |
199 | /* |
200 | * Free a previously allocated memory block. |
201 | * |
202 | * 'mem' might be a NULL pointer. In this case, the call is assumed to be a |
203 | * no-op. |
204 | * |
205 | * An optionally enabled 'size_hint' parameter contains the size of the original |
206 | * allocation. Note that in some scenarios this incurs additional cost to |
207 | * calculate the object size. |
208 | */ |
209 | void uacpi_kernel_free(void *mem) |
210 | { |
211 | free(pointer: mem); |
212 | } |
213 | |
214 | void uacpi_kernel_log(uacpi_log_level, const uacpi_char *buf) |
215 | { |
216 | write(fd: fileno(file: stdout), buffer: buf, size: strlen(s: buf)); |
217 | } |
218 | |
219 | /* |
220 | * Returns the number of 100 nanosecond ticks elapsed since boot, |
221 | * strictly monotonic. |
222 | */ |
223 | uacpi_u64 uacpi_kernel_get_ticks(void) |
224 | { |
225 | return std::chrono::duration_cast<std::chrono::nanoseconds>(d: std::chrono::steady_clock::now().time_since_epoch()).count() / 100; |
226 | } |
227 | |
228 | /* |
229 | * Spin for N microseconds. |
230 | */ |
231 | void uacpi_kernel_stall(uacpi_u8 usec) |
232 | { |
233 | usleep(usec); |
234 | } |
235 | |
236 | /* |
237 | * Sleep for N milliseconds. |
238 | */ |
239 | void uacpi_kernel_sleep(uacpi_u64 msec) |
240 | { |
241 | usleep(msec * 1000); |
242 | } |
243 | |
244 | /* |
245 | * Create/free an opaque non-recursive kernel mutex object. |
246 | */ |
247 | uacpi_handle uacpi_kernel_create_mutex(void) |
248 | { |
249 | std::mutex *mutex = new std::mutex; |
250 | return reinterpret_cast<uacpi_handle>(mutex); |
251 | } |
252 | void uacpi_kernel_free_mutex(uacpi_handle handle) |
253 | { |
254 | delete reinterpret_cast<std::mutex *>(handle); |
255 | } |
256 | |
257 | /* |
258 | * Create/free an opaque kernel (semaphore-like) event object. |
259 | */ |
260 | |
261 | uacpi_handle uacpi_kernel_create_event() |
262 | { |
263 | return new char; |
264 | } |
265 | |
266 | void uacpi_kernel_free_event(uacpi_handle handle) |
267 | { |
268 | delete reinterpret_cast<char *>(handle); |
269 | } |
270 | |
271 | /* |
272 | * Returns a unique identifier of the currently executing thread. |
273 | * |
274 | * The returned thread id cannot be UACPI_THREAD_ID_NONE. |
275 | */ |
276 | uacpi_thread_id uacpi_kernel_get_thread_id(void) |
277 | { |
278 | return pthread_self(); |
279 | } |
280 | |
281 | /* |
282 | * Try to acquire the mutex with a millisecond timeout. |
283 | * A timeout value of 0xFFFF implies infinite wait. |
284 | */ |
285 | uacpi_bool uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 timeout) |
286 | { |
287 | auto mutex = reinterpret_cast<std::mutex *>(handle); |
288 | if (timeout == 0xFFFF) |
289 | { |
290 | mutex->lock(); |
291 | return UACPI_TRUE; |
292 | } |
293 | |
294 | auto start = std::chrono::steady_clock::now(); |
295 | auto end = start + std::chrono::milliseconds(timeout); |
296 | while (!mutex->try_lock()) |
297 | { |
298 | if (std::chrono::steady_clock::now() > end) |
299 | return UACPI_FALSE; |
300 | } |
301 | |
302 | return UACPI_TRUE; |
303 | } |
304 | |
305 | void uacpi_kernel_release_mutex(uacpi_handle handle) |
306 | { |
307 | reinterpret_cast<std::mutex *>(handle)->unlock(); |
308 | } |
309 | |
310 | /* |
311 | * Try to wait for an event (counter > 0) with a millisecond timeout. |
312 | * A timeout value of 0xFFFF implies infinite wait. |
313 | * |
314 | * The internal counter is decremented by 1 if wait was successful. |
315 | * |
316 | * A successful wait is indicated by returning UACPI_TRUE. |
317 | */ |
318 | uacpi_bool uacpi_kernel_wait_for_event(uacpi_handle, uacpi_u16) |
319 | { |
320 | return UACPI_FALSE; |
321 | } |
322 | |
323 | /* |
324 | * Signal the event object by incrementing its internal counter by 1. |
325 | * |
326 | * This function may be used in interrupt contexts. |
327 | */ |
328 | void uacpi_kernel_signal_event(uacpi_handle) |
329 | { |
330 | } |
331 | |
332 | /* |
333 | * Reset the event counter to 0. |
334 | */ |
335 | void uacpi_kernel_reset_event(uacpi_handle) |
336 | { |
337 | } |
338 | |
339 | /* |
340 | * Handle a firmware request. |
341 | * |
342 | * Currently either a Breakpoint or Fatal operators. |
343 | */ |
344 | uacpi_status uacpi_kernel_handle_firmware_request(uacpi_firmware_request *) |
345 | { |
346 | return UACPI_STATUS_OK; |
347 | } |
348 | |
349 | /* |
350 | * Install an interrupt handler at 'irq', 'ctx' is passed to the provided |
351 | * handler for every invocation. |
352 | * |
353 | * 'out_irq_handle' is set to a kernel-implemented value that can be used to |
354 | * refer to this handler from other API. |
355 | */ |
356 | uacpi_status uacpi_kernel_install_interrupt_handler(uacpi_u32, uacpi_interrupt_handler, uacpi_handle, uacpi_handle *) |
357 | { |
358 | return UACPI_STATUS_OK; |
359 | } |
360 | |
361 | /* |
362 | * Uninstall an interrupt handler. 'irq_handle' is the value returned via |
363 | * 'out_irq_handle' during installation. |
364 | */ |
365 | uacpi_status uacpi_kernel_uninstall_interrupt_handler(uacpi_interrupt_handler, uacpi_handle) |
366 | { |
367 | return UACPI_STATUS_OK; |
368 | } |
369 | |
370 | /* |
371 | * Create/free a kernel spinlock object. |
372 | * |
373 | * Unlike other types of locks, spinlocks may be used in interrupt contexts. |
374 | */ |
375 | uacpi_handle uacpi_kernel_create_spinlock(void) |
376 | { |
377 | spinlock_t *lock = new spinlock_t; |
378 | spinlock_init(lock); |
379 | return reinterpret_cast<uacpi_handle>(lock); |
380 | } |
381 | |
382 | void uacpi_kernel_free_spinlock(uacpi_handle handle) |
383 | { |
384 | delete reinterpret_cast<spinlock_t *>(handle); |
385 | } |
386 | |
387 | /* |
388 | * Lock/unlock helpers for spinlocks. |
389 | * |
390 | * These are expected to disable interrupts, returning the previous state of cpu |
391 | * flags, that can be used to possibly re-enable interrupts if they were enabled |
392 | * before. |
393 | * |
394 | * Note that lock is infalliable. |
395 | */ |
396 | uacpi_cpu_flags uacpi_kernel_spinlock_lock(uacpi_handle handle) |
397 | { |
398 | uacpi_cpu_flags flags = 0; |
399 | spinlock_acquire(reinterpret_cast<spinlock_t *>(handle)); |
400 | return flags; |
401 | } |
402 | |
403 | void uacpi_kernel_spinlock_unlock(uacpi_handle handle, uacpi_cpu_flags flags) |
404 | { |
405 | MOS_UNUSED(flags); |
406 | spinlock_release(reinterpret_cast<spinlock_t *>(handle)); |
407 | } |
408 | |
409 | /* |
410 | * Schedules deferred work for execution. |
411 | * Might be invoked from an interrupt context. |
412 | */ |
413 | std::vector<std::thread> work_threads; |
414 | uacpi_status uacpi_kernel_schedule_work(uacpi_work_type type, uacpi_work_handler handler, uacpi_handle ctx) |
415 | { |
416 | MOS_UNUSED(type); |
417 | |
418 | std::thread t([handler, ctx] { handler(ctx); }); |
419 | work_threads.push_back(x: std::move(t)); |
420 | return UACPI_STATUS_OK; |
421 | } |
422 | |
423 | /* |
424 | * Blocks until all scheduled work is complete and the work queue becomes empty. |
425 | */ |
426 | uacpi_status uacpi_kernel_wait_for_work_completion(void) |
427 | { |
428 | for (auto &t : work_threads) |
429 | t.join(); |
430 | return UACPI_STATUS_OK; |
431 | } |
432 | |