1#pragma once
2
3#include <uacpi/types.h>
4#include <uacpi/platform/arch_helpers.h>
5
6#ifdef __cplusplus
7extern "C" {
8#endif
9
10/*
11 * Raw IO API, this is only used for accessing verified data from
12 * "safe" code (aka not indirectly invoked by the AML interpreter),
13 * e.g. programming FADT & FACS registers.
14 *
15 * NOTE:
16 * 'byte_width' is ALWAYS one of 1, 2, 4, 8. You are NOT allowed to implement
17 * this in terms of memcpy, as hardware expects accesses to be of the EXACT
18 * width.
19 * -------------------------------------------------------------------------
20 */
21uacpi_status uacpi_kernel_raw_memory_read(
22 uacpi_phys_addr address, uacpi_u8 byte_width, uacpi_u64 *out_value
23);
24uacpi_status uacpi_kernel_raw_memory_write(
25 uacpi_phys_addr address, uacpi_u8 byte_width, uacpi_u64 in_value
26);
27
28/*
29 * NOTE:
30 * 'byte_width' is ALWAYS one of 1, 2, 4. You are NOT allowed to break e.g. a
31 * 4-byte access into four 1-byte accesses. Hardware ALWAYS expects accesses to
32 * be of the exact width.
33 */
34uacpi_status uacpi_kernel_raw_io_read(
35 uacpi_io_addr address, uacpi_u8 byte_width, uacpi_u64 *out_value
36);
37uacpi_status uacpi_kernel_raw_io_write(
38 uacpi_io_addr address, uacpi_u8 byte_width, uacpi_u64 in_value
39);
40// -------------------------------------------------------------------------
41
42/*
43 * NOTE:
44 * 'byte_width' is ALWAYS one of 1, 2, 4. Since PCI registers are 32 bits wide
45 * this must be able to handle e.g. a 1-byte access by reading at the nearest
46 * 4-byte aligned offset below, then masking the value to select the target
47 * byte.
48 */
49uacpi_status uacpi_kernel_pci_read(
50 uacpi_pci_address *address, uacpi_size offset,
51 uacpi_u8 byte_width, uacpi_u64 *value
52);
53uacpi_status uacpi_kernel_pci_write(
54 uacpi_pci_address *address, uacpi_size offset,
55 uacpi_u8 byte_width, uacpi_u64 value
56);
57
58/*
59 * Map a SystemIO address at [base, base + len) and return a kernel-implemented
60 * handle that can be used for reading and writing the IO range.
61 */
62uacpi_status uacpi_kernel_io_map(
63 uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle
64);
65void uacpi_kernel_io_unmap(uacpi_handle handle);
66
67/*
68 * Read/Write the IO range mapped via uacpi_kernel_io_map
69 * at a 0-based 'offset' within the range.
70 *
71 * NOTE:
72 * 'byte_width' is ALWAYS one of 1, 2, 4. You are NOT allowed to break e.g. a
73 * 4-byte access into four 1-byte accesses. Hardware ALWAYS expects accesses to
74 * be of the exact width.
75 */
76uacpi_status uacpi_kernel_io_read(
77 uacpi_handle, uacpi_size offset,
78 uacpi_u8 byte_width, uacpi_u64 *value
79);
80uacpi_status uacpi_kernel_io_write(
81 uacpi_handle, uacpi_size offset,
82 uacpi_u8 byte_width, uacpi_u64 value
83);
84
85void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len);
86void uacpi_kernel_unmap(void *addr, uacpi_size len);
87
88/*
89 * Allocate a block of memory of 'size' bytes.
90 * The contents of the allocated memory are unspecified.
91 */
92void *uacpi_kernel_alloc(uacpi_size size);
93
94/*
95 * Allocate a block of memory of 'count' * 'size' bytes.
96 * The returned memory block is expected to be zero-filled.
97 */
98void *uacpi_kernel_calloc(uacpi_size count, uacpi_size size);
99
100/*
101 * Free a previously allocated memory block.
102 *
103 * 'mem' might be a NULL pointer. In this case, the call is assumed to be a
104 * no-op.
105 *
106 * An optionally enabled 'size_hint' parameter contains the size of the original
107 * allocation. Note that in some scenarios this incurs additional cost to
108 * calculate the object size.
109 */
110#ifndef UACPI_SIZED_FREES
111void uacpi_kernel_free(void *mem);
112#else
113void uacpi_kernel_free(void *mem, uacpi_size size_hint);
114#endif
115
116typedef enum uacpi_log_level {
117 /*
118 * Super verbose logging, every op & uop being processed is logged.
119 * Mostly useful for tracking down hangs/lockups.
120 */
121 UACPI_LOG_DEBUG = 4,
122
123 /*
124 * A little verbose, every operation region access is traced with a bit of
125 * extra information on top.
126 */
127 UACPI_LOG_TRACE = 3,
128
129 /*
130 * Only logs the bare minimum information about state changes and/or
131 * initialization progress.
132 */
133 UACPI_LOG_INFO = 2,
134
135 /*
136 * Logs recoverable errors and/or non-important aborts.
137 */
138 UACPI_LOG_WARN = 1,
139
140 /*
141 * Logs only critical errors that might affect the ability to initialize or
142 * prevent stable runtime.
143 */
144 UACPI_LOG_ERROR = 0,
145} uacpi_log_level;
146
147#ifndef UACPI_FORMATTED_LOGGING
148void uacpi_kernel_log(uacpi_log_level, const uacpi_char*);
149#else
150UACPI_PRINTF_DECL(2, 3)
151void uacpi_kernel_log(uacpi_log_level, const uacpi_char*, ...);
152void uacpi_kernel_vlog(uacpi_log_level, const uacpi_char*, uacpi_va_list);
153#endif
154
155/*
156 * Returns the number of 100 nanosecond ticks elapsed since boot,
157 * strictly monotonic.
158 */
159uacpi_u64 uacpi_kernel_get_ticks(void);
160
161/*
162 * Spin for N microseconds.
163 */
164void uacpi_kernel_stall(uacpi_u8 usec);
165
166/*
167 * Sleep for N milliseconds.
168 */
169void uacpi_kernel_sleep(uacpi_u64 msec);
170
171/*
172 * Create/free an opaque non-recursive kernel mutex object.
173 */
174uacpi_handle uacpi_kernel_create_mutex(void);
175void uacpi_kernel_free_mutex(uacpi_handle);
176
177/*
178 * Create/free an opaque kernel (semaphore-like) event object.
179 */
180uacpi_handle uacpi_kernel_create_event(void);
181void uacpi_kernel_free_event(uacpi_handle);
182
183/*
184 * Returns a unique identifier of the currently executing thread.
185 *
186 * The returned thread id cannot be UACPI_THREAD_ID_NONE.
187 */
188uacpi_thread_id uacpi_kernel_get_thread_id(void);
189
190/*
191 * Try to acquire the mutex with a millisecond timeout.
192 * A timeout value of 0xFFFF implies infinite wait.
193 */
194uacpi_bool uacpi_kernel_acquire_mutex(uacpi_handle, uacpi_u16);
195void uacpi_kernel_release_mutex(uacpi_handle);
196
197/*
198 * Try to wait for an event (counter > 0) with a millisecond timeout.
199 * A timeout value of 0xFFFF implies infinite wait.
200 *
201 * The internal counter is decremented by 1 if wait was successful.
202 *
203 * A successful wait is indicated by returning UACPI_TRUE.
204 */
205uacpi_bool uacpi_kernel_wait_for_event(uacpi_handle, uacpi_u16);
206
207/*
208 * Signal the event object by incrementing its internal counter by 1.
209 *
210 * This function may be used in interrupt contexts.
211 */
212void uacpi_kernel_signal_event(uacpi_handle);
213
214/*
215 * Reset the event counter to 0.
216 */
217void uacpi_kernel_reset_event(uacpi_handle);
218
219/*
220 * Handle a firmware request.
221 *
222 * Currently either a Breakpoint or Fatal operators.
223 */
224uacpi_status uacpi_kernel_handle_firmware_request(uacpi_firmware_request*);
225
226/*
227 * Install an interrupt handler at 'irq', 'ctx' is passed to the provided
228 * handler for every invocation.
229 *
230 * 'out_irq_handle' is set to a kernel-implemented value that can be used to
231 * refer to this handler from other API.
232 */
233uacpi_status uacpi_kernel_install_interrupt_handler(
234 uacpi_u32 irq, uacpi_interrupt_handler, uacpi_handle ctx,
235 uacpi_handle *out_irq_handle
236);
237
238/*
239 * Uninstall an interrupt handler. 'irq_handle' is the value returned via
240 * 'out_irq_handle' during installation.
241 */
242uacpi_status uacpi_kernel_uninstall_interrupt_handler(
243 uacpi_interrupt_handler, uacpi_handle irq_handle
244);
245
246/*
247 * Create/free a kernel spinlock object.
248 *
249 * Unlike other types of locks, spinlocks may be used in interrupt contexts.
250 */
251uacpi_handle uacpi_kernel_create_spinlock(void);
252void uacpi_kernel_free_spinlock(uacpi_handle);
253
254/*
255 * Lock/unlock helpers for spinlocks.
256 *
257 * These are expected to disable interrupts, returning the previous state of cpu
258 * flags, that can be used to possibly re-enable interrupts if they were enabled
259 * before.
260 *
261 * Note that lock is infalliable.
262 */
263uacpi_cpu_flags uacpi_kernel_spinlock_lock(uacpi_handle);
264void uacpi_kernel_spinlock_unlock(uacpi_handle, uacpi_cpu_flags);
265
266typedef enum uacpi_work_type {
267 /*
268 * Schedule a GPE handler method for execution.
269 * This should be scheduled to run on CPU0 to avoid potential SMI-related
270 * firmware bugs.
271 */
272 UACPI_WORK_GPE_EXECUTION,
273
274 /*
275 * Schedule a Notify(device) firmware request for execution.
276 * This can run on any CPU.
277 */
278 UACPI_WORK_NOTIFICATION,
279} uacpi_work_type;
280
281typedef void (*uacpi_work_handler)(uacpi_handle);
282
283/*
284 * Schedules deferred work for execution.
285 * Might be invoked from an interrupt context.
286 */
287uacpi_status uacpi_kernel_schedule_work(
288 uacpi_work_type, uacpi_work_handler, uacpi_handle ctx
289);
290
291/*
292 * Blocks until all scheduled work is complete and the work queue becomes empty.
293 */
294uacpi_status uacpi_kernel_wait_for_work_completion(void);
295
296#ifdef __cplusplus
297}
298#endif
299