1 | #include <uacpi/internal/event.h> |
2 | #include <uacpi/internal/registers.h> |
3 | #include <uacpi/internal/context.h> |
4 | #include <uacpi/internal/io.h> |
5 | #include <uacpi/internal/log.h> |
6 | #include <uacpi/internal/namespace.h> |
7 | #include <uacpi/internal/interpreter.h> |
8 | #include <uacpi/internal/notify.h> |
9 | #include <uacpi/internal/utilities.h> |
10 | #include <uacpi/acpi.h> |
11 | |
12 | #define UACPI_EVENT_DISABLED 0 |
13 | #define UACPI_EVENT_ENABLED 1 |
14 | |
15 | #ifndef UACPI_REDUCED_HARDWARE |
16 | |
17 | struct fixed_event { |
18 | uacpi_u8 enable_field; |
19 | uacpi_u8 status_field; |
20 | uacpi_u16 enable_mask; |
21 | uacpi_u16 status_mask; |
22 | }; |
23 | |
24 | struct fixed_event_handler { |
25 | uacpi_interrupt_handler handler; |
26 | uacpi_handle ctx; |
27 | }; |
28 | |
29 | static const struct fixed_event fixed_events[UACPI_FIXED_EVENT_MAX + 1] = { |
30 | [UACPI_FIXED_EVENT_GLOBAL_LOCK] = { |
31 | .status_field = UACPI_REGISTER_FIELD_GBL_STS, |
32 | .enable_field = UACPI_REGISTER_FIELD_GBL_EN, |
33 | .enable_mask = ACPI_PM1_EN_GBL_EN_MASK, |
34 | .status_mask = ACPI_PM1_STS_GBL_STS_MASK, |
35 | }, |
36 | [UACPI_FIXED_EVENT_TIMER_STATUS] = { |
37 | .status_field = UACPI_REGISTER_FIELD_TMR_STS, |
38 | .enable_field = UACPI_REGISTER_FIELD_TMR_EN, |
39 | .enable_mask = ACPI_PM1_EN_TMR_EN_MASK, |
40 | .status_mask = ACPI_PM1_STS_TMR_STS_MASK, |
41 | }, |
42 | [UACPI_FIXED_EVENT_POWER_BUTTON] = { |
43 | .status_field = UACPI_REGISTER_FIELD_PWRBTN_STS, |
44 | .enable_field = UACPI_REGISTER_FIELD_PWRBTN_EN, |
45 | .enable_mask = ACPI_PM1_EN_PWRBTN_EN_MASK, |
46 | .status_mask = ACPI_PM1_STS_PWRBTN_STS_MASK, |
47 | }, |
48 | [UACPI_FIXED_EVENT_SLEEP_BUTTON] = { |
49 | .status_field = UACPI_REGISTER_FIELD_SLPBTN_STS, |
50 | .enable_field = UACPI_REGISTER_FIELD_SLPBTN_EN, |
51 | .enable_mask = ACPI_PM1_EN_SLPBTN_EN_MASK, |
52 | .status_mask = ACPI_PM1_STS_SLPBTN_STS_MASK, |
53 | }, |
54 | [UACPI_FIXED_EVENT_RTC] = { |
55 | .status_field = UACPI_REGISTER_FIELD_RTC_STS, |
56 | .enable_field = UACPI_REGISTER_FIELD_RTC_EN, |
57 | .enable_mask = ACPI_PM1_EN_RTC_EN_MASK, |
58 | .status_mask = ACPI_PM1_STS_RTC_STS_MASK, |
59 | }, |
60 | }; |
61 | |
62 | static struct fixed_event_handler |
63 | fixed_event_handlers[UACPI_FIXED_EVENT_MAX + 1]; |
64 | |
65 | static uacpi_status initialize_fixed_events(void) |
66 | { |
67 | uacpi_size i; |
68 | |
69 | for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) { |
70 | uacpi_write_register_field( |
71 | fixed_events[i].enable_field, UACPI_EVENT_DISABLED |
72 | ); |
73 | } |
74 | |
75 | return UACPI_STATUS_OK; |
76 | } |
77 | |
78 | static uacpi_status set_event(uacpi_u8 event, uacpi_u8 value) |
79 | { |
80 | uacpi_status ret; |
81 | uacpi_u64 raw_value; |
82 | const struct fixed_event *ev = &fixed_events[event]; |
83 | |
84 | ret = uacpi_write_register_field(ev->enable_field, value); |
85 | if (uacpi_unlikely_error(ret)) |
86 | return ret; |
87 | |
88 | ret = uacpi_read_register_field(ev->enable_field, &raw_value); |
89 | if (uacpi_unlikely_error(ret)) |
90 | return ret; |
91 | |
92 | if (raw_value != value) { |
93 | uacpi_error("failed to %sable fixed event %d\n" , |
94 | value ? "en" : "dis" , event); |
95 | return UACPI_STATUS_HARDWARE_TIMEOUT; |
96 | } |
97 | |
98 | uacpi_trace("fixed event %d %sabled successfully\n" , |
99 | event, value ? "en" : "dis" ); |
100 | return UACPI_STATUS_OK; |
101 | } |
102 | |
103 | uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event) |
104 | { |
105 | if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) |
106 | return UACPI_STATUS_INVALID_ARGUMENT; |
107 | if (uacpi_is_hardware_reduced()) |
108 | return UACPI_STATUS_OK; |
109 | |
110 | /* |
111 | * Attempting to enable an event that doesn't have a handler is most likely |
112 | * an error, don't allow it. |
113 | */ |
114 | if (uacpi_unlikely(fixed_event_handlers[event].handler == UACPI_NULL)) |
115 | return UACPI_STATUS_NO_HANDLER; |
116 | |
117 | return set_event(event, UACPI_EVENT_ENABLED); |
118 | } |
119 | |
120 | uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event) |
121 | { |
122 | if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) |
123 | return UACPI_STATUS_INVALID_ARGUMENT; |
124 | if (uacpi_is_hardware_reduced()) |
125 | return UACPI_STATUS_OK; |
126 | |
127 | return set_event(event, UACPI_EVENT_DISABLED); |
128 | } |
129 | |
130 | uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event) |
131 | { |
132 | if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) |
133 | return UACPI_STATUS_INVALID_ARGUMENT; |
134 | if (uacpi_is_hardware_reduced()) |
135 | return UACPI_STATUS_OK; |
136 | |
137 | return uacpi_write_register_field( |
138 | fixed_events[event].status_field, ACPI_PM1_STS_CLEAR |
139 | ); |
140 | } |
141 | |
142 | static uacpi_interrupt_ret dispatch_fixed_event( |
143 | const struct fixed_event *ev, uacpi_fixed_event event |
144 | ) |
145 | { |
146 | uacpi_status ret; |
147 | struct fixed_event_handler *evh = &fixed_event_handlers[event]; |
148 | |
149 | ret = uacpi_write_register_field(ev->status_field, ACPI_PM1_STS_CLEAR); |
150 | if (uacpi_unlikely_error(ret)) |
151 | return UACPI_INTERRUPT_NOT_HANDLED; |
152 | |
153 | if (uacpi_unlikely_error(evh->handler == UACPI_NULL)) { |
154 | uacpi_warn( |
155 | "fixed event %d fired but no handler installed, disabling...\n" , |
156 | event |
157 | ); |
158 | uacpi_write_register_field(ev->enable_field, UACPI_EVENT_DISABLED); |
159 | return UACPI_INTERRUPT_NOT_HANDLED; |
160 | } |
161 | |
162 | return evh->handler(evh->ctx); |
163 | } |
164 | |
165 | static uacpi_interrupt_ret handle_fixed_events(void) |
166 | { |
167 | uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; |
168 | uacpi_status ret; |
169 | uacpi_u64 enable_mask, status_mask; |
170 | uacpi_size i; |
171 | |
172 | ret = uacpi_read_register(UACPI_REGISTER_PM1_STS, &status_mask); |
173 | if (uacpi_unlikely_error(ret)) |
174 | return int_ret; |
175 | |
176 | ret = uacpi_read_register(UACPI_REGISTER_PM1_EN, &enable_mask); |
177 | if (uacpi_unlikely_error(ret)) |
178 | return int_ret; |
179 | |
180 | for (i = 0; i < UACPI_FIXED_EVENT_MAX; ++i) |
181 | { |
182 | const struct fixed_event *ev = &fixed_events[i]; |
183 | |
184 | if (!(status_mask & ev->status_mask) || |
185 | !(enable_mask & ev->enable_mask)) |
186 | continue; |
187 | |
188 | int_ret |= dispatch_fixed_event(ev, event: i); |
189 | } |
190 | |
191 | return int_ret; |
192 | } |
193 | |
194 | struct gpe_native_handler { |
195 | uacpi_gpe_handler cb; |
196 | uacpi_handle ctx; |
197 | |
198 | /* |
199 | * Preserved values to be used for state restoration if this handler is |
200 | * removed at any point. |
201 | */ |
202 | uacpi_handle previous_handler; |
203 | uacpi_u8 previous_triggering : 1; |
204 | uacpi_u8 previous_handler_type : 3; |
205 | uacpi_u8 previously_enabled : 1; |
206 | }; |
207 | |
208 | struct gpe_implicit_notify_handler { |
209 | struct gpe_implicit_notify_handler *next; |
210 | uacpi_namespace_node *device; |
211 | }; |
212 | |
213 | #define EVENTS_PER_GPE_REGISTER 8 |
214 | |
215 | /* |
216 | * NOTE: |
217 | * This API and handler types are inspired by ACPICA, let's not reinvent the |
218 | * wheel and follow a similar path that people ended up finding useful after |
219 | * years of dealing with ACPI. Obviously credit goes to them for inventing |
220 | * "implicit notify" and other neat API. |
221 | */ |
222 | enum gpe_handler_type { |
223 | GPE_HANDLER_TYPE_NONE = 0, |
224 | GPE_HANDLER_TYPE_AML_HANDLER = 1, |
225 | GPE_HANDLER_TYPE_NATIVE_HANDLER = 2, |
226 | GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW = 3, |
227 | GPE_HANDLER_TYPE_IMPLICIT_NOTIFY = 4, |
228 | }; |
229 | |
230 | struct gp_event { |
231 | union { |
232 | struct gpe_native_handler *native_handler; |
233 | struct gpe_implicit_notify_handler *implicit_handler; |
234 | uacpi_namespace_node *aml_handler; |
235 | uacpi_handle *any_handler; |
236 | }; |
237 | |
238 | struct gpe_register *reg; |
239 | uacpi_u16 idx; |
240 | |
241 | // "reference count" of the number of times this event has been enabled |
242 | uacpi_u8 num_users; |
243 | |
244 | uacpi_u8 handler_type : 3; |
245 | uacpi_u8 triggering : 1; |
246 | uacpi_u8 wake : 1; |
247 | uacpi_u8 block_interrupts : 1; |
248 | }; |
249 | |
250 | struct gpe_register { |
251 | struct acpi_gas status; |
252 | struct acpi_gas enable; |
253 | |
254 | uacpi_u8 runtime_mask; |
255 | uacpi_u8 wake_mask; |
256 | uacpi_u8 masked_mask; |
257 | uacpi_u8 current_mask; |
258 | |
259 | uacpi_u16 base_idx; |
260 | }; |
261 | |
262 | struct gpe_block { |
263 | struct gpe_block *prev, *next; |
264 | |
265 | /* |
266 | * Technically this can only refer to \_GPE, but there's also apparently a |
267 | * "GPE Block Device" with id "ACPI0006", which is not used by anyone. We |
268 | * still keep it as a possibility that someone might eventually use it, so |
269 | * it is supported here. |
270 | */ |
271 | uacpi_namespace_node *device_node; |
272 | |
273 | struct gpe_register *registers; |
274 | struct gp_event *events; |
275 | struct gpe_interrupt_ctx *irq_ctx; |
276 | |
277 | uacpi_u16 num_registers; |
278 | uacpi_u16 num_events; |
279 | uacpi_u16 base_idx; |
280 | }; |
281 | |
282 | struct gpe_interrupt_ctx { |
283 | struct gpe_interrupt_ctx *prev, *next; |
284 | |
285 | struct gpe_block *gpe_head; |
286 | uacpi_handle irq_handle; |
287 | uacpi_u32 irq; |
288 | }; |
289 | static struct gpe_interrupt_ctx *gpe_interrupt_head; |
290 | |
291 | uacpi_u8 gpe_get_mask(struct gp_event *event) |
292 | { |
293 | return 1 << (event->idx - event->reg->base_idx); |
294 | } |
295 | |
296 | enum gpe_state { |
297 | GPE_STATE_ENABLED, |
298 | GPE_STATE_ENABLED_CONDITIONALLY, |
299 | GPE_STATE_DISABLED, |
300 | }; |
301 | |
302 | static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) |
303 | { |
304 | uacpi_status ret; |
305 | struct gpe_register *reg = event->reg; |
306 | uacpi_u64 enable_mask; |
307 | uacpi_u8 event_bit; |
308 | |
309 | event_bit = gpe_get_mask(event); |
310 | if (reg->masked_mask & event_bit) |
311 | return UACPI_STATUS_OK; |
312 | |
313 | if (state == GPE_STATE_ENABLED_CONDITIONALLY) { |
314 | if (!(reg->current_mask & event_bit)) |
315 | return UACPI_STATUS_OK; |
316 | |
317 | state = GPE_STATE_ENABLED; |
318 | } |
319 | |
320 | ret = uacpi_gas_read(gas: ®->enable, value: &enable_mask); |
321 | if (uacpi_unlikely_error(ret)) |
322 | return ret; |
323 | |
324 | switch (state) { |
325 | case GPE_STATE_ENABLED: |
326 | enable_mask |= event_bit; |
327 | break; |
328 | case GPE_STATE_DISABLED: |
329 | enable_mask &= ~event_bit; |
330 | break; |
331 | default: |
332 | return UACPI_STATUS_INVALID_ARGUMENT; |
333 | } |
334 | |
335 | return uacpi_gas_write(gas: ®->enable, value: enable_mask); |
336 | } |
337 | |
338 | static uacpi_status clear_gpe(struct gp_event *event) |
339 | { |
340 | struct gpe_register *reg = event->reg; |
341 | |
342 | return uacpi_gas_write(gas: ®->status, value: gpe_get_mask(event)); |
343 | } |
344 | |
345 | static uacpi_status restore_gpe(struct gp_event *event) |
346 | { |
347 | uacpi_status ret; |
348 | |
349 | if (event->triggering == UACPI_GPE_TRIGGERING_LEVEL) { |
350 | ret = clear_gpe(event); |
351 | if (uacpi_unlikely_error(ret)) |
352 | return ret; |
353 | } |
354 | |
355 | ret = set_gpe_state(event, state: GPE_STATE_ENABLED_CONDITIONALLY); |
356 | event->block_interrupts = UACPI_FALSE; |
357 | |
358 | return ret; |
359 | } |
360 | |
361 | static void async_restore_gpe(uacpi_handle opaque) |
362 | { |
363 | uacpi_status ret; |
364 | struct gp_event *event = opaque; |
365 | |
366 | ret = restore_gpe(event); |
367 | if (uacpi_unlikely_error(ret)) { |
368 | uacpi_error("unable to restore GPE(%02X): %s\n" , |
369 | event->idx, uacpi_status_to_string(ret)); |
370 | } |
371 | } |
372 | |
373 | static void async_run_gpe_handler(uacpi_handle opaque) |
374 | { |
375 | uacpi_status ret; |
376 | struct gp_event *event = opaque; |
377 | |
378 | switch (event->handler_type) { |
379 | case GPE_HANDLER_TYPE_AML_HANDLER: { |
380 | uacpi_object *method_obj; |
381 | |
382 | method_obj = uacpi_namespace_node_get_object(node: event->aml_handler); |
383 | if (uacpi_unlikely(method_obj == UACPI_NULL || |
384 | method_obj->type != UACPI_OBJECT_METHOD)) { |
385 | uacpi_error("GPE(%02X) has invalid or deleted AML handler\n" , |
386 | event->idx); |
387 | break; |
388 | } |
389 | |
390 | uacpi_trace("executing GPE(%02X) handler %.4s\n" , |
391 | event->idx, event->aml_handler->name.text); |
392 | |
393 | ret = uacpi_execute_control_method( |
394 | scope: event->aml_handler, method: method_obj->method, UACPI_NULL, UACPI_NULL |
395 | ); |
396 | if (uacpi_unlikely_error(ret)) { |
397 | uacpi_error( |
398 | "error while executing GPE(%02X) handler %.4s: %s\n" , |
399 | event->idx, event->aml_handler->name.text, |
400 | uacpi_status_to_string(ret) |
401 | ); |
402 | break; |
403 | } |
404 | break; |
405 | } |
406 | |
407 | case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: { |
408 | struct gpe_implicit_notify_handler *handler; |
409 | |
410 | handler = event->implicit_handler; |
411 | while (handler) { |
412 | /* |
413 | * 2 - Device Wake. Used to notify OSPM that the device has signaled |
414 | * its wake event, and that OSPM needs to notify OSPM native device |
415 | * driver for the device. |
416 | */ |
417 | uacpi_notify_all(node: handler->device, value: 2); |
418 | handler = handler->next; |
419 | } |
420 | break; |
421 | } |
422 | |
423 | default: |
424 | break; |
425 | } |
426 | |
427 | /* |
428 | * We schedule the work as NOTIFICATION to make sure all other notifications |
429 | * finish before this GPE is re-enabled. |
430 | */ |
431 | ret = uacpi_kernel_schedule_work( |
432 | UACPI_WORK_NOTIFICATION, async_restore_gpe, ctx: event |
433 | ); |
434 | if (uacpi_unlikely_error(ret)) { |
435 | uacpi_error("unable to schedule GPE(%02X) restore: %s\n" , |
436 | event->idx, uacpi_status_to_string(ret)); |
437 | async_restore_gpe(opaque: event); |
438 | } |
439 | } |
440 | |
441 | static uacpi_interrupt_ret dispatch_gpe( |
442 | uacpi_namespace_node *device_node, struct gp_event *event |
443 | ) |
444 | { |
445 | uacpi_status ret; |
446 | uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; |
447 | |
448 | /* |
449 | * For raw handlers we don't do any management whatsoever, we just let the |
450 | * handler know a GPE has triggered and let it handle disable/enable as |
451 | * well as clearing. |
452 | */ |
453 | if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { |
454 | return event->native_handler->cb( |
455 | event->native_handler->ctx, device_node, event->idx |
456 | ); |
457 | } |
458 | |
459 | ret = set_gpe_state(event, state: GPE_STATE_DISABLED); |
460 | if (uacpi_unlikely_error(ret)) { |
461 | uacpi_error("failed to disable GPE(%02X): %s\n" , |
462 | event->idx, uacpi_status_to_string(ret)); |
463 | return int_ret; |
464 | } |
465 | |
466 | event->block_interrupts = UACPI_TRUE; |
467 | |
468 | if (event->triggering == UACPI_GPE_TRIGGERING_EDGE) { |
469 | ret = clear_gpe(event); |
470 | if (uacpi_unlikely_error(ret)) { |
471 | uacpi_error("unable to clear GPE(%02X): %s\n" , |
472 | event->idx, uacpi_status_to_string(ret)); |
473 | set_gpe_state(event, state: GPE_STATE_ENABLED_CONDITIONALLY); |
474 | return int_ret; |
475 | } |
476 | } |
477 | |
478 | switch (event->handler_type) { |
479 | case GPE_HANDLER_TYPE_NATIVE_HANDLER: |
480 | int_ret = event->native_handler->cb( |
481 | event->native_handler->ctx, device_node, event->idx |
482 | ); |
483 | if (!(int_ret & UACPI_GPE_REENABLE)) |
484 | break; |
485 | |
486 | ret = restore_gpe(event); |
487 | if (uacpi_unlikely_error(ret)) { |
488 | uacpi_error("unable to restore GPE(%02X): %s\n" , |
489 | event->idx, uacpi_status_to_string(ret)); |
490 | } |
491 | break; |
492 | |
493 | case GPE_HANDLER_TYPE_AML_HANDLER: |
494 | case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: |
495 | ret = uacpi_kernel_schedule_work( |
496 | UACPI_WORK_GPE_EXECUTION, async_run_gpe_handler, ctx: event |
497 | ); |
498 | if (uacpi_unlikely_error(ret)) { |
499 | uacpi_warn( |
500 | "unable to schedule GPE(%02X) for execution: %s\n" , |
501 | event->idx, uacpi_status_to_string(ret) |
502 | ); |
503 | } |
504 | break; |
505 | |
506 | default: |
507 | uacpi_warn("GPE(%02X) fired but no handler, keeping disabled\n" , |
508 | event->idx); |
509 | break; |
510 | } |
511 | |
512 | return UACPI_INTERRUPT_HANDLED; |
513 | } |
514 | |
515 | static uacpi_interrupt_ret detect_gpes(struct gpe_block *block) |
516 | { |
517 | uacpi_status ret; |
518 | uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; |
519 | struct gpe_register *reg; |
520 | struct gp_event *event; |
521 | uacpi_u64 status, enable; |
522 | uacpi_size i, j; |
523 | |
524 | while (block) { |
525 | for (i = 0; i < block->num_registers; ++i) { |
526 | reg = &block->registers[i]; |
527 | |
528 | if (!reg->runtime_mask && !reg->wake_mask) |
529 | continue; |
530 | |
531 | ret = uacpi_gas_read(gas: ®->status, value: &status); |
532 | if (uacpi_unlikely_error(ret)) |
533 | return int_ret; |
534 | |
535 | ret = uacpi_gas_read(gas: ®->enable, value: &enable); |
536 | if (uacpi_unlikely_error(ret)) |
537 | return int_ret; |
538 | |
539 | if (status == 0) |
540 | continue; |
541 | |
542 | for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) { |
543 | if (!((status & enable) & (1ull << j))) |
544 | continue; |
545 | |
546 | event = &block->events[j + i * EVENTS_PER_GPE_REGISTER]; |
547 | int_ret |= dispatch_gpe(device_node: block->device_node, event); |
548 | } |
549 | } |
550 | |
551 | block = block->next; |
552 | } |
553 | |
554 | return int_ret; |
555 | } |
556 | |
557 | static uacpi_status maybe_dispatch_gpe( |
558 | uacpi_namespace_node *gpe_device, struct gp_event *event |
559 | ) |
560 | { |
561 | uacpi_status ret; |
562 | struct gpe_register *reg = event->reg; |
563 | uacpi_u64 status; |
564 | |
565 | ret = uacpi_gas_read(gas: ®->status, value: &status); |
566 | if (uacpi_unlikely_error(ret)) |
567 | return ret; |
568 | |
569 | if (!(status & gpe_get_mask(event))) |
570 | return ret; |
571 | |
572 | dispatch_gpe(device_node: gpe_device, event); |
573 | return ret; |
574 | } |
575 | |
576 | static uacpi_interrupt_ret handle_gpes(uacpi_handle opaque) |
577 | { |
578 | struct gpe_interrupt_ctx *ctx = opaque; |
579 | |
580 | if (uacpi_unlikely(ctx == UACPI_NULL)) |
581 | return UACPI_INTERRUPT_NOT_HANDLED; |
582 | |
583 | return detect_gpes(block: ctx->gpe_head); |
584 | } |
585 | |
586 | static uacpi_status find_or_create_gpe_interrupt_ctx( |
587 | uacpi_u32 irq, struct gpe_interrupt_ctx **out_ctx |
588 | ) |
589 | { |
590 | uacpi_status ret; |
591 | struct gpe_interrupt_ctx *entry = gpe_interrupt_head; |
592 | |
593 | while (entry) { |
594 | if (entry->irq == irq) { |
595 | *out_ctx = entry; |
596 | return UACPI_STATUS_OK; |
597 | } |
598 | |
599 | entry = entry->next; |
600 | } |
601 | |
602 | entry = uacpi_kernel_calloc(count: 1, size: sizeof(*entry)); |
603 | if (uacpi_unlikely(entry == UACPI_NULL)) |
604 | return UACPI_STATUS_OUT_OF_MEMORY; |
605 | |
606 | /* |
607 | * SCI interrupt is installed by other code and is responsible for more |
608 | * things than just the GPE handling. Don't install it here. |
609 | */ |
610 | if (irq != g_uacpi_rt_ctx.fadt.sci_int) { |
611 | ret = uacpi_kernel_install_interrupt_handler( |
612 | irq, handle_gpes, ctx: entry, out_irq_handle: &entry->irq_handle |
613 | ); |
614 | if (uacpi_unlikely_error(ret)) { |
615 | uacpi_free(entry, sizeof(*entry)); |
616 | return ret; |
617 | } |
618 | } |
619 | |
620 | entry->irq = irq; |
621 | entry->next = gpe_interrupt_head; |
622 | gpe_interrupt_head = entry; |
623 | |
624 | *out_ctx = entry; |
625 | return UACPI_STATUS_OK; |
626 | } |
627 | |
628 | static void gpe_release_implicit_notify_handlers(struct gp_event *event) |
629 | { |
630 | struct gpe_implicit_notify_handler *handler, *next_handler; |
631 | |
632 | handler = event->implicit_handler; |
633 | while (handler) { |
634 | next_handler = handler->next; |
635 | uacpi_free(handler, sizeof(*handler)); |
636 | handler = next_handler; |
637 | } |
638 | |
639 | event->implicit_handler = UACPI_NULL; |
640 | } |
641 | |
642 | static void uninstall_gpe_block(struct gpe_block *block) |
643 | { |
644 | if (block->prev) |
645 | block->prev->next = block->next; |
646 | |
647 | if (block->irq_ctx) { |
648 | struct gpe_interrupt_ctx *ctx = block->irq_ctx; |
649 | |
650 | // Are we the first GPE block? |
651 | if (block == ctx->gpe_head) { |
652 | ctx->gpe_head = ctx->gpe_head->next; |
653 | } else { |
654 | struct gpe_block *prev_block = ctx->gpe_head; |
655 | |
656 | // We're not, do a search |
657 | while (prev_block) { |
658 | if (prev_block->next == block) { |
659 | prev_block->next = block->next; |
660 | break; |
661 | } |
662 | |
663 | prev_block = prev_block->next; |
664 | } |
665 | } |
666 | |
667 | // This GPE block was the last user of this interrupt context, remove it |
668 | if (ctx->gpe_head == UACPI_NULL) { |
669 | if (ctx->prev) |
670 | ctx->prev->next = ctx->next; |
671 | |
672 | if (ctx->irq != g_uacpi_rt_ctx.fadt.sci_int) { |
673 | uacpi_kernel_uninstall_interrupt_handler( |
674 | handle_gpes, irq_handle: ctx->irq_handle |
675 | ); |
676 | } |
677 | |
678 | uacpi_free(block->irq_ctx, sizeof(*block->irq_ctx)); |
679 | } |
680 | } |
681 | |
682 | if (block->registers != UACPI_NULL) { |
683 | uacpi_size i; |
684 | struct gpe_register *reg; |
685 | |
686 | for (i = 0; i < block->num_registers; ++i) { |
687 | reg = &block->registers[i]; |
688 | |
689 | if (reg->current_mask) |
690 | uacpi_gas_write(gas: ®->enable, value: 0x00); |
691 | } |
692 | } |
693 | |
694 | if (block->events != UACPI_NULL) { |
695 | uacpi_size i; |
696 | struct gp_event *event; |
697 | |
698 | for (i = 0; i < block->num_events; ++i) { |
699 | event = &block->events[i]; |
700 | |
701 | switch (event->handler_type) { |
702 | case GPE_HANDLER_TYPE_NONE: |
703 | case GPE_HANDLER_TYPE_AML_HANDLER: |
704 | break; |
705 | |
706 | case GPE_HANDLER_TYPE_NATIVE_HANDLER: |
707 | case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW: |
708 | uacpi_free(event->native_handler, |
709 | sizeof(*event->native_handler)); |
710 | break; |
711 | |
712 | case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: { |
713 | gpe_release_implicit_notify_handlers(event); |
714 | break; |
715 | } |
716 | |
717 | default: |
718 | break; |
719 | } |
720 | } |
721 | |
722 | } |
723 | |
724 | uacpi_free(block->registers, |
725 | sizeof(*block->registers) * block->num_registers); |
726 | uacpi_free(block->events, |
727 | sizeof(*block->events) * block->num_events); |
728 | uacpi_free(block, sizeof(*block)); |
729 | } |
730 | |
731 | static struct gp_event *gpe_from_block(struct gpe_block *block, uacpi_u16 idx) |
732 | { |
733 | uacpi_u16 offset; |
734 | |
735 | if (idx < block->base_idx) |
736 | return UACPI_NULL; |
737 | |
738 | offset = idx - block->base_idx; |
739 | if (offset > block->num_events) |
740 | return UACPI_NULL; |
741 | |
742 | return &block->events[offset]; |
743 | } |
744 | |
745 | struct gpe_match_ctx { |
746 | struct gpe_block *block; |
747 | uacpi_u32 matched_count; |
748 | uacpi_bool post_dynamic_table_load; |
749 | }; |
750 | |
751 | static uacpi_ns_iteration_decision do_match_gpe_methods( |
752 | uacpi_handle opaque, uacpi_namespace_node *node |
753 | ) |
754 | { |
755 | uacpi_status ret; |
756 | struct gpe_match_ctx *ctx = opaque; |
757 | struct gp_event *event; |
758 | uacpi_object *object; |
759 | uacpi_u8 triggering; |
760 | uacpi_u64 idx; |
761 | |
762 | object = uacpi_namespace_node_get_object(node); |
763 | if (object->type != UACPI_OBJECT_METHOD) |
764 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
765 | |
766 | if (node->name.text[0] != '_') |
767 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
768 | |
769 | switch (node->name.text[1]) { |
770 | case 'L': |
771 | triggering = UACPI_GPE_TRIGGERING_LEVEL; |
772 | break; |
773 | case 'E': |
774 | triggering = UACPI_GPE_TRIGGERING_EDGE; |
775 | break; |
776 | default: |
777 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
778 | } |
779 | |
780 | ret = uacpi_string_to_integer(str: &node->name.text[2], max_chars: 2, base: UACPI_BASE_HEX, out_value: &idx); |
781 | if (uacpi_unlikely_error(ret)) { |
782 | uacpi_trace("invalid GPE method name %.4s, ignored\n" , node->name.text); |
783 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
784 | } |
785 | |
786 | event = gpe_from_block(block: ctx->block, idx); |
787 | if (event == UACPI_NULL) |
788 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
789 | |
790 | switch (event->handler_type) { |
791 | /* |
792 | * This had implicit notify configured but this is no longer needed as we |
793 | * now have an actual AML handler. Free the implicit notify list and switch |
794 | * this handler to AML mode. |
795 | */ |
796 | case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: |
797 | gpe_release_implicit_notify_handlers(event); |
798 | UACPI_FALLTHROUGH; |
799 | case GPE_HANDLER_TYPE_NONE: |
800 | event->aml_handler = node; |
801 | event->handler_type = GPE_HANDLER_TYPE_AML_HANDLER; |
802 | break; |
803 | |
804 | case GPE_HANDLER_TYPE_AML_HANDLER: |
805 | // This is okay, since we're re-running the detection code |
806 | if (!ctx->post_dynamic_table_load) { |
807 | uacpi_warn( |
808 | "GPE(%02X) already matched %.4s, skipping %.4s\n" , |
809 | (uacpi_u32)idx, event->aml_handler->name.text, node->name.text |
810 | ); |
811 | } |
812 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
813 | |
814 | case GPE_HANDLER_TYPE_NATIVE_HANDLER: |
815 | case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW: |
816 | uacpi_trace( |
817 | "not assigning GPE(%02X) to %.4s, override " |
818 | "installed by user\n" , (uacpi_u32)idx, node->name.text |
819 | ); |
820 | UACPI_FALLTHROUGH; |
821 | default: |
822 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
823 | } |
824 | |
825 | uacpi_trace("assigned GPE(%02X) -> %.4s\n" , |
826 | (uacpi_u32)idx, node->name.text); |
827 | event->triggering = triggering; |
828 | ctx->matched_count++; |
829 | |
830 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
831 | } |
832 | |
833 | uacpi_status uacpi_events_match_post_dynamic_table_load(void) |
834 | { |
835 | struct gpe_match_ctx match_ctx = { |
836 | .post_dynamic_table_load = UACPI_TRUE, |
837 | }; |
838 | |
839 | struct gpe_interrupt_ctx *irq_ctx = gpe_interrupt_head; |
840 | |
841 | while (irq_ctx) { |
842 | match_ctx.block = irq_ctx->gpe_head; |
843 | |
844 | while (match_ctx.block) { |
845 | uacpi_namespace_for_each_node_depth_first( |
846 | parent: match_ctx.block->device_node, callback: do_match_gpe_methods, |
847 | user: &match_ctx |
848 | ); |
849 | match_ctx.block = match_ctx.block->next; |
850 | } |
851 | |
852 | irq_ctx = irq_ctx->next; |
853 | } |
854 | |
855 | if (match_ctx.matched_count) { |
856 | uacpi_info("matched %u additional GPEs post dynamic table load\n" , |
857 | match_ctx.matched_count); |
858 | } |
859 | |
860 | return UACPI_STATUS_OK; |
861 | } |
862 | |
863 | static uacpi_status create_gpe_block( |
864 | uacpi_namespace_node *device_node, uacpi_u32 irq, uacpi_u16 base_idx, |
865 | uacpi_u64 address, uacpi_u8 address_space_id, uacpi_u16 num_registers |
866 | ) |
867 | { |
868 | uacpi_status ret = UACPI_STATUS_OUT_OF_MEMORY; |
869 | struct gpe_match_ctx match_ctx = { 0 }; |
870 | struct gpe_block *block; |
871 | struct gpe_register *reg; |
872 | struct gp_event *event; |
873 | uacpi_size i, j; |
874 | |
875 | block = uacpi_kernel_calloc(count: 1, size: sizeof(*block)); |
876 | if (uacpi_unlikely(block == UACPI_NULL)) |
877 | return ret; |
878 | |
879 | block->device_node = device_node; |
880 | block->base_idx = base_idx; |
881 | |
882 | block->num_registers = num_registers; |
883 | block->registers = uacpi_kernel_calloc( |
884 | count: num_registers, size: sizeof(*block->registers) |
885 | ); |
886 | if (uacpi_unlikely(block->registers == UACPI_NULL)) |
887 | goto error_out; |
888 | |
889 | block->num_events = num_registers * EVENTS_PER_GPE_REGISTER; |
890 | block->events = uacpi_kernel_calloc( |
891 | count: block->num_events, size: sizeof(*block->events) |
892 | ); |
893 | if (uacpi_unlikely(block->events == UACPI_NULL)) |
894 | goto error_out; |
895 | |
896 | for (reg = block->registers, event = block->events, i = 0; |
897 | i < num_registers; ++i, ++reg) { |
898 | |
899 | /* |
900 | * Initialize this register pair as well as all the events within it. |
901 | * |
902 | * Each register has two sub registers: status & enable, 8 bits each. |
903 | * Each bit corresponds to one event that we initialize below. |
904 | */ |
905 | reg->base_idx = base_idx + (i * EVENTS_PER_GPE_REGISTER); |
906 | |
907 | reg->status.address = address + i; |
908 | reg->status.address_space_id = address_space_id; |
909 | reg->status.register_bit_width = 8; |
910 | |
911 | reg->enable.address = address + num_registers + i; |
912 | reg->enable.address_space_id = address_space_id; |
913 | reg->enable.register_bit_width = 8; |
914 | |
915 | for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j, ++event) { |
916 | event->idx = reg->base_idx + j; |
917 | event->reg = reg; |
918 | } |
919 | |
920 | /* |
921 | * Disable all GPEs in this register & clear anything that might be |
922 | * pending from earlier. |
923 | */ |
924 | ret = uacpi_gas_write(gas: ®->enable, value: 0x00); |
925 | if (uacpi_unlikely_error(ret)) |
926 | goto error_out; |
927 | |
928 | ret = uacpi_gas_write(gas: ®->status, value: 0xFF); |
929 | if (uacpi_unlikely_error(ret)) |
930 | goto error_out; |
931 | } |
932 | |
933 | ret = find_or_create_gpe_interrupt_ctx(irq, out_ctx: &block->irq_ctx); |
934 | if (uacpi_unlikely_error(ret)) |
935 | goto error_out; |
936 | |
937 | block->next = block->irq_ctx->gpe_head; |
938 | block->irq_ctx->gpe_head = block; |
939 | match_ctx.block = block; |
940 | |
941 | uacpi_namespace_for_each_node_depth_first( |
942 | parent: device_node, callback: do_match_gpe_methods, user: &match_ctx |
943 | ); |
944 | |
945 | uacpi_trace("initialized GPE block %.4s[%d->%d], %d AML handlers (IRQ %d)\n" , |
946 | device_node->name.text, base_idx, base_idx + block->num_events, |
947 | match_ctx.matched_count, irq); |
948 | return UACPI_STATUS_OK; |
949 | |
950 | error_out: |
951 | uninstall_gpe_block(block); |
952 | return ret; |
953 | } |
954 | |
955 | enum gpe_block_iteration_decision { |
956 | GPE_BLOCK_ITERATION_DECISION_BREAK, |
957 | GPE_BLOCK_ITERATION_DECISION_CONTINUE, |
958 | }; |
959 | |
960 | typedef enum gpe_block_iteration_decision |
961 | (*gpe_block_iteration_callback)(struct gpe_block*, uacpi_handle); |
962 | |
963 | static void for_each_gpe_block( |
964 | gpe_block_iteration_callback cb, uacpi_handle handle |
965 | ) |
966 | { |
967 | enum gpe_block_iteration_decision decision; |
968 | struct gpe_interrupt_ctx *irq_ctx = gpe_interrupt_head; |
969 | struct gpe_block *block; |
970 | |
971 | while (irq_ctx) { |
972 | block = irq_ctx->gpe_head; |
973 | |
974 | while (block) { |
975 | decision = cb(block, handle); |
976 | if (decision == GPE_BLOCK_ITERATION_DECISION_BREAK) |
977 | return; |
978 | |
979 | block = block->next; |
980 | } |
981 | |
982 | irq_ctx = irq_ctx->next; |
983 | } |
984 | } |
985 | |
986 | struct gpe_search_ctx { |
987 | uacpi_namespace_node *gpe_device; |
988 | uacpi_u16 idx; |
989 | struct gpe_block *out_block; |
990 | struct gp_event *out_event; |
991 | }; |
992 | |
993 | static enum gpe_block_iteration_decision do_find_gpe( |
994 | struct gpe_block *block, uacpi_handle opaque |
995 | ) |
996 | { |
997 | struct gpe_search_ctx *ctx = opaque; |
998 | |
999 | if (block->device_node != ctx->gpe_device) |
1000 | return GPE_BLOCK_ITERATION_DECISION_CONTINUE; |
1001 | |
1002 | ctx->out_block = block; |
1003 | ctx->out_event = gpe_from_block(block, idx: ctx->idx); |
1004 | if (ctx->out_event == UACPI_NULL) |
1005 | return GPE_BLOCK_ITERATION_DECISION_CONTINUE; |
1006 | |
1007 | return GPE_BLOCK_ITERATION_DECISION_BREAK; |
1008 | } |
1009 | |
1010 | static struct gp_event *get_gpe( |
1011 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1012 | ) |
1013 | { |
1014 | struct gpe_search_ctx ctx = { |
1015 | .gpe_device = gpe_device, |
1016 | .idx = idx, |
1017 | }; |
1018 | |
1019 | for_each_gpe_block(cb: do_find_gpe, handle: &ctx); |
1020 | return ctx.out_event; |
1021 | } |
1022 | |
1023 | static uacpi_status gpe_remove_user(struct gp_event *event) |
1024 | { |
1025 | uacpi_status ret = UACPI_STATUS_OK; |
1026 | |
1027 | if (uacpi_unlikely(event->num_users == 0)) |
1028 | return UACPI_STATUS_INVALID_ARGUMENT; |
1029 | |
1030 | if (--event->num_users == 0) { |
1031 | event->reg->runtime_mask &= ~gpe_get_mask(event); |
1032 | event->reg->current_mask = event->reg->runtime_mask; |
1033 | |
1034 | ret = set_gpe_state(event, state: GPE_STATE_DISABLED); |
1035 | if (uacpi_unlikely_error(ret)) |
1036 | event->num_users++; |
1037 | } |
1038 | |
1039 | return ret; |
1040 | } |
1041 | |
1042 | enum event_clear_if_first { |
1043 | EVENT_CLEAR_IF_FIRST_YES, |
1044 | EVENT_CLEAR_IF_FIRST_NO, |
1045 | }; |
1046 | |
1047 | static uacpi_status gpe_add_user( |
1048 | struct gp_event *event, enum event_clear_if_first clear_if_first |
1049 | ) |
1050 | { |
1051 | uacpi_status ret = UACPI_STATUS_OK; |
1052 | |
1053 | if (uacpi_unlikely(event->num_users == 0xFF)) |
1054 | return UACPI_STATUS_INVALID_ARGUMENT; |
1055 | |
1056 | if (++event->num_users == 1) { |
1057 | if (clear_if_first == EVENT_CLEAR_IF_FIRST_YES) |
1058 | clear_gpe(event); |
1059 | |
1060 | event->reg->runtime_mask |= gpe_get_mask(event); |
1061 | event->reg->current_mask = event->reg->runtime_mask; |
1062 | |
1063 | ret = set_gpe_state(event, state: GPE_STATE_ENABLED); |
1064 | if (uacpi_unlikely_error(ret)) |
1065 | event->num_users--; |
1066 | } |
1067 | |
1068 | return ret; |
1069 | } |
1070 | |
1071 | const uacpi_char *uacpi_gpe_triggering_to_string( |
1072 | uacpi_gpe_triggering triggering |
1073 | ) |
1074 | { |
1075 | switch (triggering) { |
1076 | case UACPI_GPE_TRIGGERING_EDGE: |
1077 | return "edge" ; |
1078 | case UACPI_GPE_TRIGGERING_LEVEL: |
1079 | return "level" ; |
1080 | default: |
1081 | return "invalid" ; |
1082 | } |
1083 | } |
1084 | |
1085 | static uacpi_status do_install_gpe_handler( |
1086 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, |
1087 | uacpi_gpe_triggering triggering, enum gpe_handler_type type, |
1088 | uacpi_gpe_handler handler, uacpi_handle ctx |
1089 | ) |
1090 | { |
1091 | struct gp_event *event; |
1092 | struct gpe_native_handler *native_handler; |
1093 | |
1094 | if (uacpi_unlikely(triggering > UACPI_GPE_TRIGGERING_MAX)) |
1095 | return UACPI_STATUS_INVALID_ARGUMENT; |
1096 | |
1097 | if (gpe_device == UACPI_NULL) { |
1098 | gpe_device = uacpi_namespace_get_predefined( |
1099 | UACPI_PREDEFINED_NAMESPACE_GPE |
1100 | ); |
1101 | } |
1102 | |
1103 | event = get_gpe(gpe_device, idx); |
1104 | if (uacpi_unlikely(event == UACPI_NULL)) |
1105 | return UACPI_STATUS_NOT_FOUND; |
1106 | |
1107 | if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER || |
1108 | event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) |
1109 | return UACPI_STATUS_ALREADY_EXISTS; |
1110 | |
1111 | native_handler = uacpi_kernel_alloc(size: sizeof(*native_handler)); |
1112 | if (uacpi_unlikely(native_handler == UACPI_NULL)) |
1113 | return UACPI_STATUS_OUT_OF_MEMORY; |
1114 | |
1115 | native_handler->cb = handler; |
1116 | native_handler->ctx = ctx; |
1117 | native_handler->previous_handler = event->any_handler; |
1118 | native_handler->previous_handler_type = event->handler_type; |
1119 | native_handler->previous_triggering = event->triggering; |
1120 | native_handler->previously_enabled = UACPI_FALSE; |
1121 | |
1122 | if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || |
1123 | event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && |
1124 | event->num_users != 0) { |
1125 | native_handler->previously_enabled = UACPI_TRUE; |
1126 | gpe_remove_user(event); |
1127 | |
1128 | if (uacpi_unlikely(event->triggering != triggering)) { |
1129 | uacpi_warn( |
1130 | "GPE(%02X) user handler claims %s triggering, originally " |
1131 | "configured as %s\n" , idx, |
1132 | uacpi_gpe_triggering_to_string(triggering), |
1133 | uacpi_gpe_triggering_to_string(event->triggering) |
1134 | ); |
1135 | } |
1136 | } |
1137 | |
1138 | event->native_handler = native_handler; |
1139 | event->handler_type = type; |
1140 | event->triggering = triggering; |
1141 | return UACPI_STATUS_OK; |
1142 | } |
1143 | |
1144 | uacpi_status uacpi_install_gpe_handler( |
1145 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, |
1146 | uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, |
1147 | uacpi_handle ctx |
1148 | ) |
1149 | { |
1150 | return do_install_gpe_handler( |
1151 | gpe_device, idx, triggering, type: GPE_HANDLER_TYPE_NATIVE_HANDLER, |
1152 | handler, ctx |
1153 | ); |
1154 | } |
1155 | |
1156 | uacpi_status uacpi_install_gpe_handler_raw( |
1157 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, |
1158 | uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, |
1159 | uacpi_handle ctx |
1160 | ) |
1161 | { |
1162 | return do_install_gpe_handler( |
1163 | gpe_device, idx, triggering, type: GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW, |
1164 | handler, ctx |
1165 | ); |
1166 | } |
1167 | |
1168 | static uacpi_bool gpe_needs_polling(struct gp_event *event) |
1169 | { |
1170 | return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE; |
1171 | } |
1172 | |
1173 | static enum gpe_block_iteration_decision do_initialize_gpe_block( |
1174 | struct gpe_block *block, uacpi_handle opaque |
1175 | ) |
1176 | { |
1177 | uacpi_status ret; |
1178 | uacpi_bool *poll_blocks = opaque; |
1179 | uacpi_size i, j, count_enabled = 0; |
1180 | struct gp_event *event; |
1181 | |
1182 | for (i = 0; i < block->num_registers; ++i) { |
1183 | for (j = 0; j < EVENTS_PER_GPE_REGISTER; ++j) { |
1184 | event = &block->events[j + i * EVENTS_PER_GPE_REGISTER]; |
1185 | |
1186 | if (event->wake || |
1187 | event->handler_type != GPE_HANDLER_TYPE_AML_HANDLER) |
1188 | continue; |
1189 | |
1190 | ret = gpe_add_user(event, clear_if_first: EVENT_CLEAR_IF_FIRST_NO); |
1191 | if (uacpi_unlikely_error(ret)) { |
1192 | uacpi_warn("failed to enable GPE(%02X): %s\n" , |
1193 | event->idx, uacpi_status_to_string(ret)); |
1194 | continue; |
1195 | } |
1196 | |
1197 | *poll_blocks |= gpe_needs_polling(event); |
1198 | count_enabled++; |
1199 | } |
1200 | } |
1201 | |
1202 | if (count_enabled) { |
1203 | uacpi_info( |
1204 | "enabled %zu GPEs in block %.4s@[%d->%d]\n" , |
1205 | count_enabled, block->device_node->name.text, |
1206 | block->base_idx, block->base_idx + block->num_events |
1207 | ); |
1208 | } |
1209 | return GPE_BLOCK_ITERATION_DECISION_CONTINUE; |
1210 | } |
1211 | |
1212 | uacpi_status uacpi_finalize_gpe_initialization(void) |
1213 | { |
1214 | static uacpi_bool gpes_finalized = UACPI_FALSE; |
1215 | uacpi_bool poll_blocks = UACPI_FALSE; |
1216 | |
1217 | if (gpes_finalized) |
1218 | return UACPI_STATUS_OK; |
1219 | |
1220 | for_each_gpe_block(cb: do_initialize_gpe_block, handle: &poll_blocks); |
1221 | if (poll_blocks) |
1222 | detect_gpes(block: gpe_interrupt_head->gpe_head); |
1223 | |
1224 | gpes_finalized = UACPI_TRUE; |
1225 | return UACPI_STATUS_OK; |
1226 | } |
1227 | |
1228 | static uacpi_status sanitize_device_and_find_gpe( |
1229 | uacpi_namespace_node **gpe_device, uacpi_u16 idx, |
1230 | struct gp_event **out_event |
1231 | ) |
1232 | { |
1233 | if (*gpe_device == UACPI_NULL) { |
1234 | *gpe_device = uacpi_namespace_get_predefined( |
1235 | UACPI_PREDEFINED_NAMESPACE_GPE |
1236 | ); |
1237 | } |
1238 | |
1239 | *out_event = get_gpe(gpe_device: *gpe_device, idx); |
1240 | if (*out_event == UACPI_NULL) |
1241 | return UACPI_STATUS_NOT_FOUND; |
1242 | |
1243 | return UACPI_STATUS_OK; |
1244 | } |
1245 | |
1246 | uacpi_status uacpi_uninstall_gpe_handler( |
1247 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, |
1248 | uacpi_gpe_handler handler |
1249 | ) |
1250 | { |
1251 | uacpi_status ret; |
1252 | struct gp_event *event; |
1253 | struct gpe_native_handler *native_handler; |
1254 | |
1255 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1256 | if (uacpi_unlikely_error(ret)) |
1257 | return ret; |
1258 | |
1259 | if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER && |
1260 | event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) |
1261 | return UACPI_STATUS_NOT_FOUND; |
1262 | |
1263 | native_handler = event->native_handler; |
1264 | if (uacpi_unlikely(native_handler->cb != handler)) |
1265 | return UACPI_STATUS_INVALID_ARGUMENT; |
1266 | |
1267 | event->aml_handler = native_handler->previous_handler; |
1268 | event->triggering = native_handler->previous_triggering; |
1269 | event->handler_type = native_handler->previous_handler_type; |
1270 | |
1271 | if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || |
1272 | event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && |
1273 | native_handler->previously_enabled) { |
1274 | gpe_add_user(event, clear_if_first: EVENT_CLEAR_IF_FIRST_NO); |
1275 | |
1276 | if (gpe_needs_polling(event)) |
1277 | maybe_dispatch_gpe(gpe_device, event); |
1278 | } |
1279 | |
1280 | uacpi_kernel_wait_for_work_completion(); |
1281 | uacpi_free(native_handler, sizeof(*native_handler)); |
1282 | return UACPI_STATUS_OK; |
1283 | } |
1284 | |
1285 | uacpi_status uacpi_enable_gpe( |
1286 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1287 | ) |
1288 | { |
1289 | uacpi_status ret; |
1290 | struct gp_event *event; |
1291 | |
1292 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1293 | if (uacpi_unlikely_error(ret)) |
1294 | return ret; |
1295 | |
1296 | if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) |
1297 | return UACPI_STATUS_NO_HANDLER; |
1298 | |
1299 | ret = gpe_add_user(event, clear_if_first: EVENT_CLEAR_IF_FIRST_YES); |
1300 | if (uacpi_unlikely_error(ret)) |
1301 | return ret; |
1302 | |
1303 | if (gpe_needs_polling(event)) |
1304 | maybe_dispatch_gpe(gpe_device, event); |
1305 | |
1306 | return UACPI_STATUS_OK; |
1307 | } |
1308 | |
1309 | uacpi_status uacpi_disable_gpe( |
1310 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1311 | ) |
1312 | { |
1313 | uacpi_status ret; |
1314 | struct gp_event *event; |
1315 | |
1316 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1317 | if (uacpi_unlikely_error(ret)) |
1318 | return ret; |
1319 | |
1320 | return gpe_remove_user(event); |
1321 | } |
1322 | |
1323 | uacpi_status uacpi_clear_gpe( |
1324 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1325 | ) |
1326 | { |
1327 | uacpi_status ret; |
1328 | struct gp_event *event; |
1329 | |
1330 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1331 | if (uacpi_unlikely_error(ret)) |
1332 | return ret; |
1333 | |
1334 | return clear_gpe(event); |
1335 | } |
1336 | |
1337 | |
1338 | static uacpi_status gpe_suspend_resume( |
1339 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, enum gpe_state state |
1340 | ) |
1341 | { |
1342 | uacpi_status ret; |
1343 | struct gp_event *event; |
1344 | |
1345 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1346 | if (uacpi_unlikely_error(ret)) |
1347 | return ret; |
1348 | |
1349 | event->block_interrupts = state == GPE_STATE_DISABLED; |
1350 | return set_gpe_state(event, state); |
1351 | } |
1352 | |
1353 | uacpi_status uacpi_suspend_gpe( |
1354 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1355 | ) |
1356 | { |
1357 | return gpe_suspend_resume(gpe_device, idx, state: GPE_STATE_DISABLED); |
1358 | } |
1359 | |
1360 | uacpi_status uacpi_resume_gpe( |
1361 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1362 | ) |
1363 | { |
1364 | return gpe_suspend_resume(gpe_device, idx, state: GPE_STATE_ENABLED); |
1365 | } |
1366 | |
1367 | uacpi_status uacpi_finish_handling_gpe( |
1368 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1369 | ) |
1370 | { |
1371 | uacpi_status ret; |
1372 | struct gp_event *event; |
1373 | |
1374 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1375 | if (uacpi_unlikely_error(ret)) |
1376 | return ret; |
1377 | |
1378 | event = get_gpe(gpe_device, idx); |
1379 | if (uacpi_unlikely(event == UACPI_NULL)) |
1380 | return UACPI_STATUS_NOT_FOUND; |
1381 | |
1382 | return restore_gpe(event); |
1383 | } |
1384 | |
1385 | static uacpi_status gpe_get_mask_unmask( |
1386 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool should_mask |
1387 | ) |
1388 | { |
1389 | uacpi_status ret; |
1390 | struct gp_event *event; |
1391 | struct gpe_register *reg; |
1392 | uacpi_u8 mask; |
1393 | |
1394 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1395 | if (uacpi_unlikely_error(ret)) |
1396 | return ret; |
1397 | |
1398 | reg = event->reg; |
1399 | mask = gpe_get_mask(event); |
1400 | |
1401 | if (should_mask) { |
1402 | if (reg->masked_mask & mask) |
1403 | return UACPI_STATUS_INVALID_ARGUMENT; |
1404 | |
1405 | set_gpe_state(event, state: GPE_STATE_DISABLED); |
1406 | reg->masked_mask |= mask; |
1407 | return UACPI_STATUS_OK; |
1408 | } |
1409 | |
1410 | if (!(reg->masked_mask & mask)) |
1411 | return UACPI_STATUS_INVALID_ARGUMENT; |
1412 | |
1413 | reg->masked_mask &= ~mask; |
1414 | if (!event->block_interrupts && event->num_users) |
1415 | set_gpe_state(event, state: GPE_STATE_ENABLED_CONDITIONALLY); |
1416 | return UACPI_STATUS_OK; |
1417 | } |
1418 | |
1419 | uacpi_status uacpi_mask_gpe( |
1420 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1421 | ) |
1422 | { |
1423 | return gpe_get_mask_unmask(gpe_device, idx, UACPI_TRUE); |
1424 | } |
1425 | |
1426 | uacpi_status uacpi_unmask_gpe( |
1427 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1428 | ) |
1429 | { |
1430 | return gpe_get_mask_unmask(gpe_device, idx, UACPI_FALSE); |
1431 | } |
1432 | |
1433 | uacpi_status uacpi_setup_gpe_for_wake( |
1434 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, |
1435 | uacpi_namespace_node *wake_device |
1436 | ) |
1437 | { |
1438 | uacpi_status ret; |
1439 | struct gp_event *event; |
1440 | |
1441 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1442 | if (uacpi_unlikely_error(ret)) |
1443 | return ret; |
1444 | |
1445 | if (wake_device != UACPI_NULL) { |
1446 | uacpi_object *obj; |
1447 | |
1448 | obj = uacpi_namespace_node_get_object(node: wake_device); |
1449 | if (wake_device != uacpi_namespace_root() && |
1450 | obj->type != UACPI_OBJECT_DEVICE) |
1451 | return UACPI_STATUS_INVALID_ARGUMENT; |
1452 | |
1453 | switch (event->handler_type) { |
1454 | case GPE_HANDLER_TYPE_NONE: |
1455 | event->handler_type = GPE_HANDLER_TYPE_IMPLICIT_NOTIFY; |
1456 | event->triggering = UACPI_GPE_TRIGGERING_LEVEL; |
1457 | break; |
1458 | |
1459 | case GPE_HANDLER_TYPE_AML_HANDLER: |
1460 | /* |
1461 | * An AML handler already exists, we expect it to call Notify() as |
1462 | * it sees fit. For now just make sure this event is disabled if it |
1463 | * had been enabled automatically previously during initialization. |
1464 | */ |
1465 | gpe_remove_user(event); |
1466 | break; |
1467 | |
1468 | case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW: |
1469 | case GPE_HANDLER_TYPE_NATIVE_HANDLER: |
1470 | uacpi_warn( |
1471 | "not configuring implicit notify for GPE(%02X) -> %.4s: " |
1472 | " a user handler already installed\n" , event->idx, |
1473 | wake_device->name.text |
1474 | ); |
1475 | break; |
1476 | |
1477 | // We will re-check this below |
1478 | case GPE_HANDLER_TYPE_IMPLICIT_NOTIFY: |
1479 | break; |
1480 | |
1481 | default: |
1482 | uacpi_warn("invalid GPE(%02X) handler type: %d\n" , |
1483 | event->idx, event->handler_type); |
1484 | return UACPI_STATUS_INTERNAL_ERROR; |
1485 | } |
1486 | |
1487 | /* |
1488 | * This GPE has no known AML handler, so we configure it to receive |
1489 | * implicit notifications for wake devices when we get a corresponding |
1490 | * GPE triggered. Usually it's the job of a matching AML handler, but |
1491 | * we didn't find any. |
1492 | */ |
1493 | if (event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) { |
1494 | struct gpe_implicit_notify_handler *implicit_handler; |
1495 | |
1496 | implicit_handler = event->implicit_handler; |
1497 | while (implicit_handler) { |
1498 | if (implicit_handler->device == wake_device) |
1499 | return UACPI_STATUS_ALREADY_EXISTS; |
1500 | |
1501 | implicit_handler = implicit_handler->next; |
1502 | } |
1503 | |
1504 | implicit_handler = uacpi_kernel_alloc(size: sizeof(*implicit_handler)); |
1505 | if (uacpi_likely(implicit_handler != UACPI_NULL)) { |
1506 | implicit_handler->device = wake_device; |
1507 | implicit_handler->next = event->implicit_handler; |
1508 | event->implicit_handler = implicit_handler; |
1509 | } else { |
1510 | uacpi_warn( |
1511 | "unable to configure implicit wake for GPE(%02X) -> %.4s: " |
1512 | "out of memory\n" , event->idx, wake_device->name.text |
1513 | ); |
1514 | } |
1515 | } |
1516 | } |
1517 | |
1518 | event->wake = UACPI_TRUE; |
1519 | return UACPI_STATUS_OK; |
1520 | } |
1521 | |
1522 | static uacpi_status gpe_enable_disable_for_wake( |
1523 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_bool enabled |
1524 | ) |
1525 | { |
1526 | uacpi_status ret; |
1527 | struct gp_event *event; |
1528 | struct gpe_register *reg; |
1529 | uacpi_u8 mask; |
1530 | |
1531 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1532 | if (uacpi_unlikely_error(ret)) |
1533 | return ret; |
1534 | |
1535 | if (!event->wake) |
1536 | return UACPI_STATUS_INVALID_ARGUMENT; |
1537 | |
1538 | reg = event->reg; |
1539 | mask = gpe_get_mask(event); |
1540 | |
1541 | if (enabled) |
1542 | reg->wake_mask |= mask; |
1543 | else |
1544 | reg->wake_mask &= mask; |
1545 | |
1546 | return UACPI_STATUS_OK; |
1547 | } |
1548 | |
1549 | uacpi_status uacpi_enable_gpe_for_wake( |
1550 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1551 | ) |
1552 | { |
1553 | return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_TRUE); |
1554 | } |
1555 | |
1556 | uacpi_status uacpi_disable_gpe_for_wake( |
1557 | uacpi_namespace_node *gpe_device, uacpi_u16 idx |
1558 | ) |
1559 | { |
1560 | return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_FALSE); |
1561 | } |
1562 | |
1563 | enum gpe_block_action { |
1564 | GPE_BLOCK_ACTION_DISABLE_ALL, |
1565 | GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, |
1566 | GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, |
1567 | GPE_BLOCK_ACTION_CLEAR_ALL, |
1568 | }; |
1569 | |
1570 | struct do_for_all_gpes_ctx { |
1571 | enum gpe_block_action action; |
1572 | uacpi_status ret; |
1573 | }; |
1574 | |
1575 | static enum gpe_block_iteration_decision do_for_all_gpes( |
1576 | struct gpe_block *block, uacpi_handle opaque |
1577 | ) |
1578 | { |
1579 | struct do_for_all_gpes_ctx *ctx = opaque; |
1580 | struct gpe_register *reg; |
1581 | uacpi_u8 value; |
1582 | uacpi_size i; |
1583 | |
1584 | for (i = 0; i < block->num_registers; ++i) { |
1585 | reg = &block->registers[i]; |
1586 | |
1587 | switch (ctx->action) { |
1588 | case GPE_BLOCK_ACTION_DISABLE_ALL: |
1589 | value = 0; |
1590 | break; |
1591 | case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME: |
1592 | value = reg->runtime_mask & ~reg->masked_mask; |
1593 | break; |
1594 | case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE: |
1595 | value = reg->wake_mask; |
1596 | break; |
1597 | case GPE_BLOCK_ACTION_CLEAR_ALL: |
1598 | ctx->ret = uacpi_gas_write(gas: ®->status, value: 0xFF); |
1599 | if (uacpi_unlikely_error(ctx->ret)) |
1600 | return GPE_BLOCK_ITERATION_DECISION_BREAK; |
1601 | continue; |
1602 | default: |
1603 | continue; |
1604 | } |
1605 | |
1606 | reg->current_mask = value; |
1607 | ctx->ret = uacpi_gas_write(gas: ®->enable, value); |
1608 | if (uacpi_unlikely_error(ctx->ret)) |
1609 | return GPE_BLOCK_ITERATION_DECISION_BREAK; |
1610 | } |
1611 | |
1612 | return GPE_BLOCK_ITERATION_DECISION_CONTINUE; |
1613 | } |
1614 | |
1615 | uacpi_status uacpi_disable_all_gpes(void) |
1616 | { |
1617 | struct do_for_all_gpes_ctx ctx = { |
1618 | .action = GPE_BLOCK_ACTION_DISABLE_ALL, |
1619 | }; |
1620 | |
1621 | for_each_gpe_block(cb: do_for_all_gpes, handle: &ctx); |
1622 | return ctx.ret; |
1623 | } |
1624 | |
1625 | uacpi_status uacpi_enable_all_runtime_gpes(void) |
1626 | { |
1627 | struct do_for_all_gpes_ctx ctx = { |
1628 | .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, |
1629 | }; |
1630 | |
1631 | for_each_gpe_block(cb: do_for_all_gpes, handle: &ctx); |
1632 | return ctx.ret; |
1633 | } |
1634 | |
1635 | uacpi_status uacpi_enable_all_wake_gpes(void) |
1636 | { |
1637 | struct do_for_all_gpes_ctx ctx = { |
1638 | .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, |
1639 | }; |
1640 | |
1641 | for_each_gpe_block(cb: do_for_all_gpes, handle: &ctx); |
1642 | return ctx.ret; |
1643 | } |
1644 | |
1645 | static uacpi_status initialize_gpes() |
1646 | { |
1647 | uacpi_status ret; |
1648 | uacpi_namespace_node *gpe_node; |
1649 | struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; |
1650 | uacpi_u8 gpe0_regs = 0, gpe1_regs = 0; |
1651 | |
1652 | gpe_node = uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_GPE); |
1653 | |
1654 | if (fadt->x_gpe0_blk.address && fadt->gpe0_blk_len) { |
1655 | gpe0_regs = fadt->gpe0_blk_len / 2; |
1656 | |
1657 | ret = create_gpe_block( |
1658 | device_node: gpe_node, irq: fadt->sci_int, base_idx: 0, address: fadt->x_gpe0_blk.address, |
1659 | address_space_id: fadt->x_gpe0_blk.address_space_id, num_registers: gpe0_regs |
1660 | ); |
1661 | if (uacpi_unlikely_error(ret)) { |
1662 | uacpi_error("unable to create FADT GPE block 0: %s\n" , |
1663 | uacpi_status_to_string(ret)); |
1664 | } |
1665 | } |
1666 | |
1667 | if (fadt->x_gpe1_blk.address && fadt->gpe1_blk_len) { |
1668 | gpe1_regs = fadt->gpe1_blk_len / 2; |
1669 | |
1670 | if (uacpi_unlikely((gpe0_regs * EVENTS_PER_GPE_REGISTER) > |
1671 | fadt->gpe1_base)) { |
1672 | uacpi_error( |
1673 | "FADT GPE block 1 [%d->%d] collides with GPE block 0 " |
1674 | "[%d->%d], ignoring\n" , |
1675 | 0, gpe0_regs * EVENTS_PER_GPE_REGISTER, fadt->gpe1_base, |
1676 | gpe1_regs * EVENTS_PER_GPE_REGISTER |
1677 | ); |
1678 | gpe1_regs = 0; |
1679 | goto out; |
1680 | } |
1681 | |
1682 | ret = create_gpe_block( |
1683 | device_node: gpe_node, irq: fadt->sci_int, base_idx: fadt->gpe1_base, address: fadt->x_gpe1_blk.address, |
1684 | address_space_id: fadt->x_gpe1_blk.address_space_id, num_registers: gpe1_regs |
1685 | ); |
1686 | if (uacpi_unlikely_error(ret)) { |
1687 | uacpi_error("unable to create FADT GPE block 1: %s\n" , |
1688 | uacpi_status_to_string(ret)); |
1689 | } |
1690 | } |
1691 | |
1692 | if (gpe0_regs == 0 && gpe1_regs == 0) |
1693 | uacpi_trace("platform has no FADT GPE events\n" ); |
1694 | |
1695 | out: |
1696 | return UACPI_STATUS_OK; |
1697 | } |
1698 | |
1699 | uacpi_status uacpi_gpe_install_block( |
1700 | uacpi_namespace_node *gpe_device, uacpi_u64 address, |
1701 | uacpi_address_space address_space, uacpi_u16 num_registers, uacpi_u32 irq |
1702 | ) |
1703 | { |
1704 | uacpi_object *obj; |
1705 | |
1706 | obj = uacpi_namespace_node_get_object(node: gpe_device); |
1707 | if (obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE) |
1708 | return UACPI_STATUS_INVALID_ARGUMENT; |
1709 | |
1710 | return create_gpe_block( |
1711 | device_node: gpe_device, irq, base_idx: 0, address, address_space_id: address_space, num_registers |
1712 | ); |
1713 | } |
1714 | |
1715 | uacpi_status uacpi_gpe_uninstall_block( |
1716 | uacpi_namespace_node *gpe_device |
1717 | ) |
1718 | { |
1719 | uacpi_object *obj; |
1720 | struct gpe_search_ctx search_ctx = { |
1721 | .idx = 0, |
1722 | .gpe_device = gpe_device, |
1723 | }; |
1724 | |
1725 | obj = uacpi_namespace_node_get_object(node: gpe_device); |
1726 | if (uacpi_unlikely(obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE)) |
1727 | return UACPI_STATUS_INVALID_ARGUMENT; |
1728 | |
1729 | for_each_gpe_block(cb: do_find_gpe, handle: &search_ctx); |
1730 | if (search_ctx.out_block == UACPI_NULL) |
1731 | return UACPI_STATUS_NOT_FOUND; |
1732 | |
1733 | uninstall_gpe_block(block: search_ctx.out_block); |
1734 | return UACPI_STATUS_OK; |
1735 | } |
1736 | |
1737 | static uacpi_interrupt_ret handle_global_lock(uacpi_handle ctx) |
1738 | { |
1739 | uacpi_cpu_flags flags; |
1740 | UACPI_UNUSED(ctx); |
1741 | |
1742 | if (uacpi_unlikely(!g_uacpi_rt_ctx.has_global_lock)) { |
1743 | uacpi_warn("platform has no global lock but a release event " |
1744 | "was fired anyway?\n" ); |
1745 | return UACPI_INTERRUPT_HANDLED; |
1746 | } |
1747 | |
1748 | flags = uacpi_kernel_spinlock_lock(g_uacpi_rt_ctx.global_lock_spinlock); |
1749 | if (!g_uacpi_rt_ctx.global_lock_pending) { |
1750 | uacpi_trace("spurious firmware global lock release notification\n" ); |
1751 | goto out; |
1752 | } |
1753 | |
1754 | uacpi_trace("received a firmware global lock release notification\n" ); |
1755 | |
1756 | uacpi_kernel_signal_event(g_uacpi_rt_ctx.global_lock_event); |
1757 | g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE; |
1758 | |
1759 | out: |
1760 | uacpi_kernel_spinlock_unlock(g_uacpi_rt_ctx.global_lock_spinlock, flags); |
1761 | return UACPI_INTERRUPT_HANDLED; |
1762 | } |
1763 | |
1764 | static uacpi_interrupt_ret handle_sci(uacpi_handle ctx) |
1765 | { |
1766 | uacpi_interrupt_ret int_ret = UACPI_INTERRUPT_NOT_HANDLED; |
1767 | |
1768 | int_ret |= handle_fixed_events(); |
1769 | int_ret |= handle_gpes(opaque: ctx); |
1770 | |
1771 | return int_ret; |
1772 | } |
1773 | |
1774 | uacpi_status uacpi_initialize_events(void) |
1775 | { |
1776 | uacpi_status ret; |
1777 | |
1778 | if (uacpi_is_hardware_reduced()) |
1779 | return UACPI_STATUS_OK; |
1780 | |
1781 | ret = initialize_fixed_events(); |
1782 | if (uacpi_unlikely_error(ret)) |
1783 | return ret; |
1784 | |
1785 | ret = initialize_gpes(); |
1786 | if (uacpi_unlikely_error(ret)) |
1787 | return ret; |
1788 | |
1789 | ret = uacpi_kernel_install_interrupt_handler( |
1790 | irq: g_uacpi_rt_ctx.fadt.sci_int, handle_sci, ctx: gpe_interrupt_head, |
1791 | out_irq_handle: &g_uacpi_rt_ctx.sci_handle |
1792 | ); |
1793 | if (uacpi_unlikely_error(ret)) |
1794 | return ret; |
1795 | |
1796 | g_uacpi_rt_ctx.global_lock_event = uacpi_kernel_create_event(); |
1797 | if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_event == UACPI_NULL)) |
1798 | return UACPI_STATUS_OUT_OF_MEMORY; |
1799 | |
1800 | g_uacpi_rt_ctx.global_lock_spinlock = uacpi_kernel_create_spinlock(); |
1801 | if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_spinlock == UACPI_NULL)) |
1802 | return UACPI_STATUS_OUT_OF_MEMORY; |
1803 | |
1804 | ret = uacpi_install_fixed_event_handler( |
1805 | UACPI_FIXED_EVENT_GLOBAL_LOCK, handler: handle_global_lock, UACPI_NULL |
1806 | ); |
1807 | if (uacpi_likely_success(ret)) { |
1808 | if (uacpi_unlikely(g_uacpi_rt_ctx.facs == UACPI_NULL)) { |
1809 | uacpi_uninstall_fixed_event_handler(UACPI_FIXED_EVENT_GLOBAL_LOCK); |
1810 | uacpi_warn("platform has global lock but no FACS was provided\n" ); |
1811 | return ret; |
1812 | } |
1813 | g_uacpi_rt_ctx.has_global_lock = UACPI_TRUE; |
1814 | } else if (ret == UACPI_STATUS_HARDWARE_TIMEOUT) { |
1815 | // has_global_lock remains set to false |
1816 | uacpi_trace("platform has no global lock\n" ); |
1817 | ret = UACPI_STATUS_OK; |
1818 | } |
1819 | |
1820 | return ret; |
1821 | } |
1822 | |
1823 | uacpi_status uacpi_install_fixed_event_handler( |
1824 | uacpi_fixed_event event, uacpi_interrupt_handler handler, |
1825 | uacpi_handle user |
1826 | ) |
1827 | { |
1828 | uacpi_status ret; |
1829 | struct fixed_event_handler *ev; |
1830 | |
1831 | if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) |
1832 | return UACPI_STATUS_INVALID_ARGUMENT; |
1833 | if (uacpi_is_hardware_reduced()) |
1834 | return UACPI_STATUS_OK; |
1835 | |
1836 | ev = &fixed_event_handlers[event]; |
1837 | |
1838 | if (ev->handler != UACPI_NULL) |
1839 | return UACPI_STATUS_ALREADY_EXISTS; |
1840 | |
1841 | ev->handler = handler; |
1842 | ev->ctx = user; |
1843 | |
1844 | ret = set_event(event, UACPI_EVENT_ENABLED); |
1845 | if (uacpi_unlikely_error(ret)) { |
1846 | ev->handler = UACPI_NULL; |
1847 | ev->ctx = UACPI_NULL; |
1848 | return ret; |
1849 | } |
1850 | |
1851 | return UACPI_STATUS_OK; |
1852 | } |
1853 | |
1854 | uacpi_status uacpi_uninstall_fixed_event_handler( |
1855 | uacpi_fixed_event event |
1856 | ) |
1857 | { |
1858 | uacpi_status ret; |
1859 | struct fixed_event_handler *ev; |
1860 | |
1861 | if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) |
1862 | return UACPI_STATUS_INVALID_ARGUMENT; |
1863 | if (uacpi_is_hardware_reduced()) |
1864 | return UACPI_STATUS_OK; |
1865 | |
1866 | ev = &fixed_event_handlers[event]; |
1867 | |
1868 | ret = set_event(event, UACPI_EVENT_DISABLED); |
1869 | if (uacpi_unlikely_error(ret)) |
1870 | return ret; |
1871 | |
1872 | ev->handler = UACPI_NULL; |
1873 | ev->ctx = UACPI_NULL; |
1874 | |
1875 | return UACPI_STATUS_OK; |
1876 | } |
1877 | |
1878 | uacpi_status uacpi_fixed_event_info( |
1879 | uacpi_fixed_event event, uacpi_event_info *out_info |
1880 | ) |
1881 | { |
1882 | uacpi_status ret; |
1883 | const struct fixed_event *ev; |
1884 | uacpi_u64 raw_value; |
1885 | uacpi_event_info info = 0; |
1886 | |
1887 | if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) |
1888 | return UACPI_STATUS_INVALID_ARGUMENT; |
1889 | if (uacpi_is_hardware_reduced()) |
1890 | return UACPI_STATUS_NOT_FOUND; |
1891 | |
1892 | if (fixed_event_handlers[event].handler != UACPI_NULL) |
1893 | info |= UACPI_EVENT_INFO_HAS_HANDLER; |
1894 | |
1895 | ev = &fixed_events[event]; |
1896 | |
1897 | ret = uacpi_read_register_field(ev->enable_field, &raw_value); |
1898 | if (uacpi_unlikely_error(ret)) |
1899 | return ret; |
1900 | if (raw_value) |
1901 | info |= UACPI_EVENT_INFO_ENABLED | UACPI_EVENT_INFO_HW_ENABLED; |
1902 | |
1903 | ret = uacpi_read_register_field(ev->status_field, &raw_value); |
1904 | if (uacpi_unlikely_error(ret)) |
1905 | return ret; |
1906 | if (raw_value) |
1907 | info |= UACPI_EVENT_INFO_HW_STATUS; |
1908 | |
1909 | *out_info = info; |
1910 | return UACPI_STATUS_OK; |
1911 | } |
1912 | |
1913 | uacpi_status uacpi_gpe_info( |
1914 | uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_event_info *out_info |
1915 | ) |
1916 | { |
1917 | uacpi_status ret; |
1918 | struct gp_event *event; |
1919 | struct gpe_register *reg; |
1920 | uacpi_u8 mask; |
1921 | uacpi_u64 raw_value; |
1922 | uacpi_event_info info = 0; |
1923 | |
1924 | ret = sanitize_device_and_find_gpe(gpe_device: &gpe_device, idx, out_event: &event); |
1925 | if (uacpi_unlikely_error(ret)) |
1926 | return ret; |
1927 | |
1928 | if (event->handler_type != GPE_HANDLER_TYPE_NONE) |
1929 | info |= UACPI_EVENT_INFO_HAS_HANDLER; |
1930 | |
1931 | mask = gpe_get_mask(event); |
1932 | reg = event->reg; |
1933 | |
1934 | if (reg->runtime_mask & mask) |
1935 | info |= UACPI_EVENT_INFO_ENABLED; |
1936 | if (reg->masked_mask & mask) |
1937 | info |= UACPI_EVENT_INFO_MASKED; |
1938 | if (reg->wake_mask & mask) |
1939 | info |= UACPI_EVENT_INFO_ENABLED_FOR_WAKE; |
1940 | |
1941 | ret = uacpi_gas_read(gas: ®->enable, value: &raw_value); |
1942 | if (uacpi_unlikely_error(ret)) |
1943 | return ret; |
1944 | if (raw_value & mask) |
1945 | info |= UACPI_EVENT_INFO_HW_ENABLED; |
1946 | |
1947 | ret = uacpi_gas_read(gas: ®->status, value: &raw_value); |
1948 | if (uacpi_unlikely_error(ret)) |
1949 | return ret; |
1950 | if (raw_value & mask) |
1951 | info |= UACPI_EVENT_INFO_HW_STATUS; |
1952 | |
1953 | *out_info = info; |
1954 | return UACPI_STATUS_OK; |
1955 | } |
1956 | |
1957 | #define PM1_STATUS_BITS ( \ |
1958 | ACPI_PM1_STS_TMR_STS_MASK | \ |
1959 | ACPI_PM1_STS_BM_STS_MASK | \ |
1960 | ACPI_PM1_STS_GBL_STS_MASK | \ |
1961 | ACPI_PM1_STS_PWRBTN_STS_MASK | \ |
1962 | ACPI_PM1_STS_SLPBTN_STS_MASK | \ |
1963 | ACPI_PM1_STS_RTC_STS_MASK | \ |
1964 | ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK | \ |
1965 | ACPI_PM1_STS_WAKE_STS_MASK \ |
1966 | ) |
1967 | |
1968 | uacpi_status uacpi_clear_all_events(void) |
1969 | { |
1970 | uacpi_status ret; |
1971 | struct do_for_all_gpes_ctx ctx = { |
1972 | .action = GPE_BLOCK_ACTION_CLEAR_ALL, |
1973 | }; |
1974 | |
1975 | ret = uacpi_write_register(UACPI_REGISTER_PM1_STS, PM1_STATUS_BITS); |
1976 | if (uacpi_unlikely_error(ret)) |
1977 | return ret; |
1978 | |
1979 | for_each_gpe_block(cb: do_for_all_gpes, handle: &ctx); |
1980 | return ctx.ret; |
1981 | } |
1982 | |
1983 | #endif |
1984 | |