1 | #include <uacpi/uacpi.h> |
2 | #include <uacpi/acpi.h> |
3 | |
4 | #include <uacpi/internal/log.h> |
5 | #include <uacpi/internal/context.h> |
6 | #include <uacpi/internal/utilities.h> |
7 | #include <uacpi/internal/tables.h> |
8 | #include <uacpi/internal/interpreter.h> |
9 | #include <uacpi/internal/namespace.h> |
10 | #include <uacpi/internal/opregion.h> |
11 | #include <uacpi/internal/registers.h> |
12 | #include <uacpi/internal/event.h> |
13 | #include <uacpi/internal/osi.h> |
14 | |
15 | struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 }; |
16 | |
17 | void uacpi_context_set_loop_timeout(uacpi_u32 seconds) |
18 | { |
19 | if (seconds == 0) |
20 | seconds = UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS; |
21 | |
22 | g_uacpi_rt_ctx.loop_timeout_seconds = seconds; |
23 | } |
24 | |
25 | void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth) |
26 | { |
27 | if (depth == 0) |
28 | depth = UACPI_DEFAULT_MAX_CALL_STACK_DEPTH; |
29 | |
30 | g_uacpi_rt_ctx.max_call_stack_depth = depth; |
31 | } |
32 | |
33 | uacpi_u32 uacpi_context_get_loop_timeout(void) |
34 | { |
35 | return g_uacpi_rt_ctx.loop_timeout_seconds; |
36 | } |
37 | |
38 | const uacpi_char *uacpi_status_to_string(uacpi_status st) |
39 | { |
40 | switch (st) { |
41 | case UACPI_STATUS_OK: |
42 | return "no error" ; |
43 | case UACPI_STATUS_MAPPING_FAILED: |
44 | return "failed to map memory" ; |
45 | case UACPI_STATUS_OUT_OF_MEMORY: |
46 | return "out of memory" ; |
47 | case UACPI_STATUS_BAD_CHECKSUM: |
48 | return "bad table checksum" ; |
49 | case UACPI_STATUS_INVALID_SIGNATURE: |
50 | return "invalid table signature" ; |
51 | case UACPI_STATUS_NOT_FOUND: |
52 | return "not found" ; |
53 | case UACPI_STATUS_INVALID_ARGUMENT: |
54 | return "invalid argument" ; |
55 | case UACPI_STATUS_UNIMPLEMENTED: |
56 | return "unimplemented" ; |
57 | case UACPI_STATUS_ALREADY_EXISTS: |
58 | return "already exists" ; |
59 | case UACPI_STATUS_INTERNAL_ERROR: |
60 | return "internal error" ; |
61 | case UACPI_STATUS_TYPE_MISMATCH: |
62 | return "object type mismatch" ; |
63 | case UACPI_STATUS_INIT_LEVEL_MISMATCH: |
64 | return "init level too low/high for this action" ; |
65 | case UACPI_STATUS_NAMESPACE_NODE_DANGLING: |
66 | return "attempting to use a dangling namespace node" ; |
67 | case UACPI_STATUS_NO_HANDLER: |
68 | return "no handler found" ; |
69 | case UACPI_STATUS_NO_RESOURCE_END_TAG: |
70 | return "resource template without an end tag" ; |
71 | case UACPI_STATUS_COMPILED_OUT: |
72 | return "this functionality has been compiled out of this build" ; |
73 | case UACPI_STATUS_HARDWARE_TIMEOUT: |
74 | return "timed out waiting for hardware response" ; |
75 | case UACPI_STATUS_TIMEOUT: |
76 | return "wait timed out" ; |
77 | case UACPI_STATUS_OVERRIDEN: |
78 | return "the requested action has been overriden" ; |
79 | case UACPI_STATUS_DENIED: |
80 | return "the requested action has been denied" ; |
81 | |
82 | case UACPI_STATUS_AML_UNDEFINED_REFERENCE: |
83 | return "AML referenced an undefined object" ; |
84 | case UACPI_STATUS_AML_INVALID_NAMESTRING: |
85 | return "invalid AML name string" ; |
86 | case UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS: |
87 | return "object already exists" ; |
88 | case UACPI_STATUS_AML_INVALID_OPCODE: |
89 | return "invalid AML opcode" ; |
90 | case UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE: |
91 | return "incompatible AML object type" ; |
92 | case UACPI_STATUS_AML_BAD_ENCODING: |
93 | return "bad AML instruction encoding" ; |
94 | case UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX: |
95 | return "out of bounds AML index" ; |
96 | case UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH: |
97 | return "AML attempted to acquire a mutex with a lower sync level" ; |
98 | case UACPI_STATUS_AML_INVALID_RESOURCE: |
99 | return "invalid resource template encoding or type" ; |
100 | case UACPI_STATUS_AML_LOOP_TIMEOUT: |
101 | return "hanging AML while loop" ; |
102 | case UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT: |
103 | return "reached maximum AML call stack depth" ; |
104 | default: |
105 | return "<invalid status>" ; |
106 | } |
107 | } |
108 | |
109 | #ifndef UACPI_REDUCED_HARDWARE |
110 | enum hw_mode { |
111 | HW_MODE_ACPI = 0, |
112 | HW_MODE_LEGACY = 1, |
113 | }; |
114 | |
115 | static enum hw_mode read_mode(void) |
116 | { |
117 | uacpi_status ret; |
118 | uacpi_u64 raw_value; |
119 | struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; |
120 | |
121 | if (!fadt->smi_cmd) |
122 | return HW_MODE_ACPI; |
123 | |
124 | ret = uacpi_read_register_field(UACPI_REGISTER_FIELD_SCI_EN, &raw_value); |
125 | if (uacpi_unlikely_error(ret)) |
126 | return HW_MODE_LEGACY; |
127 | |
128 | return raw_value ? HW_MODE_ACPI : HW_MODE_LEGACY; |
129 | } |
130 | |
131 | static uacpi_status set_mode(enum hw_mode mode) |
132 | { |
133 | uacpi_status ret; |
134 | uacpi_u64 raw_value, stalled_time = 0; |
135 | struct acpi_fadt *fadt = &g_uacpi_rt_ctx.fadt; |
136 | |
137 | if (uacpi_unlikely(!fadt->smi_cmd)) { |
138 | uacpi_error("SMI_CMD is not implemented by the firmware\n" ); |
139 | return UACPI_STATUS_NOT_FOUND; |
140 | } |
141 | |
142 | if (uacpi_unlikely(!fadt->acpi_enable && !fadt->acpi_disable)) { |
143 | uacpi_error("mode transition is not implemented by the hardware\n" ); |
144 | return UACPI_STATUS_NOT_FOUND; |
145 | } |
146 | |
147 | switch (mode) { |
148 | case HW_MODE_ACPI: |
149 | raw_value = fadt->acpi_enable; |
150 | break; |
151 | case HW_MODE_LEGACY: |
152 | raw_value = fadt->acpi_disable; |
153 | break; |
154 | default: |
155 | return UACPI_STATUS_INVALID_ARGUMENT; |
156 | } |
157 | |
158 | ret = uacpi_write_register(UACPI_REGISTER_SMI_CMD, raw_value); |
159 | if (uacpi_unlikely_error(ret)) |
160 | return ret; |
161 | |
162 | // Allow up to 5 seconds for the hardware to enter the desired mode |
163 | while (stalled_time < (5 * 1000 * 1000)) { |
164 | if (read_mode() == mode) |
165 | return UACPI_STATUS_OK; |
166 | |
167 | uacpi_kernel_stall(usec: 100); |
168 | stalled_time += 100; |
169 | } |
170 | |
171 | uacpi_error("hardware time out while changing modes\n" ); |
172 | return UACPI_STATUS_HARDWARE_TIMEOUT; |
173 | } |
174 | |
175 | static uacpi_status enter_mode(enum hw_mode mode) |
176 | { |
177 | uacpi_status ret; |
178 | const uacpi_char *mode_str; |
179 | |
180 | UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_TABLES_LOADED); |
181 | |
182 | if (uacpi_is_hardware_reduced()) |
183 | return UACPI_STATUS_OK; |
184 | |
185 | mode_str = mode == HW_MODE_LEGACY ? "legacy" : "acpi" ; |
186 | |
187 | if (read_mode() == mode) { |
188 | uacpi_trace("%s mode already enabled\n" , mode_str); |
189 | return UACPI_STATUS_OK; |
190 | } |
191 | |
192 | ret = set_mode(mode); |
193 | if (uacpi_unlikely_error(ret)) { |
194 | uacpi_error( |
195 | "unable to enter %s mode: %s\n" , |
196 | mode_str, uacpi_status_to_string(ret) |
197 | ); |
198 | return ret; |
199 | } |
200 | |
201 | uacpi_trace("entered %s mode\n" , mode_str); |
202 | return ret; |
203 | } |
204 | |
205 | uacpi_status uacpi_enter_acpi_mode(void) |
206 | { |
207 | return enter_mode(mode: HW_MODE_ACPI); |
208 | } |
209 | |
210 | uacpi_status uacpi_leave_acpi_mode(void) |
211 | { |
212 | return enter_mode(mode: HW_MODE_LEGACY); |
213 | } |
214 | #endif |
215 | |
216 | UACPI_PACKED(struct uacpi_rxsdt { |
217 | struct acpi_sdt_hdr hdr; |
218 | uacpi_u8 ptr_bytes[]; |
219 | }) |
220 | |
221 | static uacpi_status initialize_from_rxsdt(uacpi_phys_addr rxsdt_addr, |
222 | uacpi_size entry_size) |
223 | { |
224 | struct uacpi_rxsdt *rxsdt; |
225 | uacpi_size i, entry_bytes, map_len = sizeof(*rxsdt); |
226 | uacpi_phys_addr entry_addr; |
227 | uacpi_status ret; |
228 | |
229 | rxsdt = uacpi_kernel_map(addr: rxsdt_addr, len: map_len); |
230 | if (rxsdt == UACPI_NULL) |
231 | return UACPI_STATUS_MAPPING_FAILED; |
232 | |
233 | ret = uacpi_check_table_signature(table: rxsdt, |
234 | expect: entry_size == 8 ? ACPI_XSDT_SIGNATURE : ACPI_RSDT_SIGNATURE); |
235 | if (uacpi_unlikely_error(ret)) |
236 | goto error_out; |
237 | |
238 | map_len = rxsdt->hdr.length; |
239 | uacpi_kernel_unmap(addr: rxsdt, len: sizeof(*rxsdt)); |
240 | |
241 | if (uacpi_unlikely(map_len < (sizeof(*rxsdt) + entry_size))) |
242 | return UACPI_STATUS_INVALID_TABLE_LENGTH; |
243 | |
244 | // Make sure length is aligned to entry size so we don't OOB |
245 | entry_bytes = map_len - sizeof(*rxsdt); |
246 | entry_bytes &= ~(entry_size - 1); |
247 | |
248 | rxsdt = uacpi_kernel_map(addr: rxsdt_addr, len: map_len); |
249 | if (uacpi_unlikely(rxsdt == UACPI_NULL)) |
250 | return UACPI_STATUS_MAPPING_FAILED; |
251 | |
252 | ret = uacpi_verify_table_checksum(table: rxsdt, size: map_len); |
253 | if (uacpi_unlikely_error(ret)) |
254 | goto error_out; |
255 | |
256 | for (i = 0; i < entry_bytes; i += entry_size) { |
257 | uacpi_u64 entry_phys_addr_large = 0; |
258 | uacpi_memcpy(dest: &entry_phys_addr_large, src: &rxsdt->ptr_bytes[i], count: entry_size); |
259 | |
260 | if (!entry_phys_addr_large) |
261 | continue; |
262 | |
263 | entry_addr = uacpi_truncate_phys_addr_with_warn(large_addr: entry_phys_addr_large); |
264 | ret = uacpi_table_install_physical_with_origin( |
265 | phys: entry_addr, origin: UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL, UACPI_NULL |
266 | ); |
267 | if (uacpi_unlikely_error(ret)) |
268 | return ret; |
269 | } |
270 | |
271 | ret = UACPI_STATUS_OK; |
272 | |
273 | error_out: |
274 | uacpi_kernel_unmap(addr: rxsdt, len: map_len); |
275 | return ret; |
276 | } |
277 | |
278 | uacpi_status uacpi_initialize(const uacpi_init_params *params) |
279 | { |
280 | uacpi_status ret; |
281 | struct acpi_rsdp *rsdp; |
282 | uacpi_phys_addr rxsdt; |
283 | uacpi_size rxsdt_entry_size; |
284 | |
285 | UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_EARLY); |
286 | |
287 | g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_TABLES_LOADED; |
288 | g_uacpi_rt_ctx.is_rev1 = UACPI_TRUE; |
289 | g_uacpi_rt_ctx.last_sleep_typ_a = UACPI_SLEEP_TYP_INVALID; |
290 | g_uacpi_rt_ctx.last_sleep_typ_b = UACPI_SLEEP_TYP_INVALID; |
291 | g_uacpi_rt_ctx.s0_sleep_typ_a = UACPI_SLEEP_TYP_INVALID; |
292 | g_uacpi_rt_ctx.s0_sleep_typ_b = UACPI_SLEEP_TYP_INVALID; |
293 | g_uacpi_rt_ctx.log_level = params->log_level; |
294 | g_uacpi_rt_ctx.flags = params->flags; |
295 | |
296 | if (g_uacpi_rt_ctx.loop_timeout_seconds == 0) |
297 | uacpi_context_set_loop_timeout(UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS); |
298 | if (g_uacpi_rt_ctx.max_call_stack_depth == 0) |
299 | uacpi_context_set_max_call_stack_depth(UACPI_DEFAULT_MAX_CALL_STACK_DEPTH); |
300 | |
301 | ret = uacpi_initialize_tables(); |
302 | if (uacpi_unlikely_error(ret)) |
303 | return ret; |
304 | |
305 | rsdp = uacpi_kernel_map(addr: params->rsdp, len: sizeof(struct acpi_rsdp)); |
306 | if (rsdp == UACPI_NULL) |
307 | return UACPI_STATUS_MAPPING_FAILED; |
308 | |
309 | if (rsdp->revision > 1 && rsdp->xsdt_addr && |
310 | !uacpi_check_flag(UACPI_FLAG_BAD_XSDT)) |
311 | { |
312 | rxsdt = uacpi_truncate_phys_addr_with_warn(large_addr: rsdp->xsdt_addr); |
313 | rxsdt_entry_size = 8; |
314 | } else { |
315 | rxsdt = (uacpi_phys_addr)rsdp->rsdt_addr; |
316 | rxsdt_entry_size = 4; |
317 | } |
318 | |
319 | uacpi_kernel_unmap(addr: rsdp, len: sizeof(struct acpi_rsdp)); |
320 | |
321 | if (!rxsdt) { |
322 | uacpi_error("both RSDT & XSDT tables are NULL!\n" ); |
323 | return UACPI_STATUS_INVALID_ARGUMENT; |
324 | } |
325 | |
326 | ret = initialize_from_rxsdt(rxsdt_addr: rxsdt, entry_size: rxsdt_entry_size); |
327 | if (uacpi_unlikely_error(ret)) |
328 | return ret; |
329 | |
330 | ret = uacpi_initialize_interfaces(); |
331 | if (uacpi_unlikely_error(ret)) |
332 | return ret; |
333 | |
334 | ret = uacpi_namespace_initialize_predefined(); |
335 | if (uacpi_unlikely_error(ret)) |
336 | return ret; |
337 | |
338 | uacpi_install_default_address_space_handlers(); |
339 | |
340 | if (uacpi_check_flag(UACPI_FLAG_NO_ACPI_MODE)) |
341 | return UACPI_STATUS_OK; |
342 | |
343 | return uacpi_enter_acpi_mode(); |
344 | } |
345 | |
346 | struct table_load_stats { |
347 | uacpi_u32 load_counter; |
348 | uacpi_u32 failure_counter; |
349 | }; |
350 | |
351 | static void trace_table_load_failure( |
352 | struct acpi_sdt_hdr *tbl, uacpi_log_level lvl, uacpi_status ret |
353 | ) |
354 | { |
355 | uacpi_log_lvl( |
356 | lvl, |
357 | "failed to load '%.4s' (OEM ID '%.6s' OEM Table ID '%.8s'): %s\n" , |
358 | tbl->signature, tbl->oemid, tbl->oem_table_id, |
359 | uacpi_status_to_string(ret) |
360 | ); |
361 | } |
362 | |
363 | static uacpi_bool match_ssdt_or_psdt(struct uacpi_installed_table *tbl) |
364 | { |
365 | static uacpi_object_name ssdt_signature = { |
366 | .text = { ACPI_SSDT_SIGNATURE }, |
367 | }; |
368 | static uacpi_object_name psdt_signature = { |
369 | .text = { ACPI_PSDT_SIGNATURE }, |
370 | }; |
371 | |
372 | if (tbl->flags & UACPI_TABLE_LOADED) |
373 | return UACPI_FALSE; |
374 | |
375 | return tbl->signature.id == ssdt_signature.id || |
376 | tbl->signature.id == psdt_signature.id; |
377 | } |
378 | |
379 | uacpi_status uacpi_namespace_load(void) |
380 | { |
381 | struct uacpi_table tbl; |
382 | uacpi_status ret; |
383 | struct table_load_stats st = { 0 }; |
384 | uacpi_size cur_index; |
385 | |
386 | UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_TABLES_LOADED); |
387 | |
388 | ret = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, out_table: &tbl); |
389 | if (uacpi_unlikely_error(ret)) { |
390 | uacpi_error("unable to find DSDT: %s\n" , uacpi_status_to_string(ret)); |
391 | return ret; |
392 | } |
393 | |
394 | ret = uacpi_table_load_with_cause(idx: tbl.index, cause: UACPI_TABLE_LOAD_CAUSE_INIT); |
395 | if (uacpi_unlikely_error(ret)) { |
396 | trace_table_load_failure(tbl: tbl.hdr, lvl: UACPI_LOG_ERROR, ret); |
397 | st.failure_counter++; |
398 | } |
399 | st.load_counter++; |
400 | |
401 | for (cur_index = 0;; cur_index = tbl.index + 1) { |
402 | ret = uacpi_table_match(base_idx: cur_index, match_ssdt_or_psdt, out_table: &tbl); |
403 | if (ret != UACPI_STATUS_OK) { |
404 | if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) |
405 | return ret; |
406 | |
407 | break; |
408 | } |
409 | |
410 | ret = uacpi_table_load_with_cause(idx: tbl.index, cause: UACPI_TABLE_LOAD_CAUSE_INIT); |
411 | if (uacpi_unlikely_error(ret)) { |
412 | trace_table_load_failure(tbl: tbl.hdr, lvl: UACPI_LOG_WARN, ret); |
413 | st.failure_counter++; |
414 | } |
415 | st.load_counter++; |
416 | } |
417 | |
418 | if (uacpi_unlikely(st.failure_counter != 0)) { |
419 | uacpi_info( |
420 | "loaded & executed %u AML blob%s (%u error%s)\n" , st.load_counter, |
421 | st.load_counter > 1 ? "s" : "" , st.failure_counter, |
422 | st.failure_counter > 1 ? "s" : "" |
423 | ); |
424 | } else { |
425 | uacpi_info( |
426 | "successfully loaded & executed %u AML blob%s\n" , st.load_counter, |
427 | st.load_counter > 1 ? "s" : "" |
428 | ); |
429 | } |
430 | |
431 | ret = uacpi_initialize_events(); |
432 | if (uacpi_unlikely_error(ret)) { |
433 | uacpi_warn("event initialization failed: %s\n" , |
434 | uacpi_status_to_string(ret)); |
435 | } |
436 | |
437 | g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_LOADED; |
438 | return ret; |
439 | } |
440 | |
441 | struct ns_init_context { |
442 | uacpi_size ini_executed; |
443 | uacpi_size ini_errors; |
444 | uacpi_size sta_executed; |
445 | uacpi_size sta_errors; |
446 | uacpi_size devices; |
447 | uacpi_size thermal_zones; |
448 | uacpi_size processors; |
449 | }; |
450 | |
451 | static void ini_eval(struct ns_init_context *ctx, uacpi_namespace_node *node) |
452 | { |
453 | uacpi_status ret; |
454 | |
455 | ret = uacpi_eval(parent: node, path: "_INI" , UACPI_NULL, UACPI_NULL); |
456 | if (ret == UACPI_STATUS_NOT_FOUND) |
457 | return; |
458 | |
459 | ctx->ini_executed++; |
460 | if (uacpi_unlikely_error(ret)) |
461 | ctx->ini_errors++; |
462 | } |
463 | |
464 | static uacpi_status sta_eval( |
465 | struct ns_init_context *ctx, uacpi_namespace_node *node, |
466 | uacpi_u32 *value |
467 | ) |
468 | { |
469 | uacpi_status ret; |
470 | |
471 | ret = uacpi_eval_sta(node, flags: value); |
472 | if (*value == 0xFFFFFFFF) |
473 | return ret; |
474 | |
475 | ctx->sta_executed++; |
476 | if (uacpi_unlikely_error(ret)) |
477 | ctx->sta_errors++; |
478 | |
479 | return ret; |
480 | } |
481 | |
482 | static enum uacpi_ns_iteration_decision do_sta_ini( |
483 | void *opaque, uacpi_namespace_node *node |
484 | ) |
485 | { |
486 | struct ns_init_context *ctx = opaque; |
487 | uacpi_status ret; |
488 | uacpi_u32 sta_ret; |
489 | uacpi_bool is_sb; |
490 | uacpi_object *obj; |
491 | |
492 | // We don't care about aliases |
493 | if (node->flags & UACPI_NAMESPACE_NODE_FLAG_ALIAS) |
494 | return UACPI_NS_ITERATION_DECISION_NEXT_PEER; |
495 | |
496 | is_sb = node == uacpi_namespace_get_predefined( |
497 | UACPI_PREDEFINED_NAMESPACE_SB |
498 | ); |
499 | |
500 | obj = uacpi_namespace_node_get_object(node); |
501 | if (node != uacpi_namespace_root() && !is_sb) { |
502 | switch (obj->type) { |
503 | case UACPI_OBJECT_DEVICE: |
504 | ctx->devices++; |
505 | break; |
506 | case UACPI_OBJECT_THERMAL_ZONE: |
507 | ctx->thermal_zones++; |
508 | break; |
509 | case UACPI_OBJECT_PROCESSOR: |
510 | ctx->processors++; |
511 | break; |
512 | default: |
513 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
514 | } |
515 | } |
516 | |
517 | ret = sta_eval(ctx, node, value: &sta_ret); |
518 | if (uacpi_unlikely_error(ret)) |
519 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
520 | |
521 | if (!(sta_ret & ACPI_STA_RESULT_DEVICE_PRESENT)) { |
522 | if (!(sta_ret & ACPI_STA_RESULT_DEVICE_FUNCTIONING)) |
523 | return UACPI_NS_ITERATION_DECISION_NEXT_PEER; |
524 | |
525 | /* |
526 | * ACPI 6.5 specification: |
527 | * _STA may return bit 0 clear (not present) with bit [3] set (device |
528 | * is functional). This case is used to indicate a valid device for |
529 | * which no device driver should be loaded (for example, a bridge |
530 | * device.) Children of this device may be present and valid. OSPM |
531 | * should continue enumeration below a device whose _STA returns this |
532 | * bit combination. |
533 | */ |
534 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
535 | } |
536 | |
537 | if (node != uacpi_namespace_root() && !is_sb) |
538 | ini_eval(ctx, node); |
539 | |
540 | return UACPI_NS_ITERATION_DECISION_CONTINUE; |
541 | } |
542 | |
543 | uacpi_status uacpi_namespace_initialize(void) |
544 | { |
545 | struct ns_init_context ctx = { 0 }; |
546 | uacpi_namespace_node *root; |
547 | uacpi_address_space_handlers *handlers; |
548 | uacpi_address_space_handler *handler; |
549 | |
550 | UACPI_ENSURE_INIT_LEVEL_IS(UACPI_INIT_LEVEL_NAMESPACE_LOADED); |
551 | |
552 | /* |
553 | * Initialization order here is identical to ACPICA because ACPI |
554 | * specification doesn't really have any detailed steps that explain |
555 | * how to do it. |
556 | */ |
557 | |
558 | root = uacpi_namespace_root(); |
559 | |
560 | // Step 1 - Execute \_INI |
561 | ini_eval(ctx: &ctx, node: root); |
562 | |
563 | // Step 2 - Execute \_SB._INI |
564 | ini_eval( |
565 | ctx: &ctx, node: uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_SB) |
566 | ); |
567 | |
568 | /* |
569 | * Step 3 - Run _REG methods for all globally installed |
570 | * address space handlers. |
571 | */ |
572 | handlers = uacpi_node_get_address_space_handlers(node: root); |
573 | if (handlers) { |
574 | handler = handlers->head; |
575 | |
576 | while (handler) { |
577 | uacpi_reg_all_opregions(device_node: root, space: handler->space); |
578 | handler = handler->next; |
579 | } |
580 | } |
581 | |
582 | // Step 4 - Run all other _STA and _INI methods |
583 | uacpi_namespace_for_each_node_depth_first(parent: root, callback: do_sta_ini, user: &ctx); |
584 | |
585 | uacpi_info( |
586 | "namespace initialization done: " |
587 | "%zu devices, %zu thermal zones, %zu processors\n" , |
588 | ctx.devices, ctx.thermal_zones, ctx.processors |
589 | ); |
590 | |
591 | uacpi_trace( |
592 | "_STA calls: %zu (%zu errors), _INI calls: %zu (%zu errors)\n" , |
593 | ctx.sta_executed, ctx.sta_errors, ctx.ini_executed, |
594 | ctx.ini_errors |
595 | ); |
596 | |
597 | g_uacpi_rt_ctx.init_level = UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED; |
598 | return UACPI_STATUS_OK; |
599 | } |
600 | |
601 | uacpi_status |
602 | uacpi_eval(uacpi_namespace_node *parent, const uacpi_char *path, |
603 | const uacpi_args *args, uacpi_object **ret) |
604 | { |
605 | struct uacpi_namespace_node *node; |
606 | uacpi_object *obj; |
607 | |
608 | if (parent == UACPI_NULL && path == UACPI_NULL) |
609 | return UACPI_STATUS_INVALID_ARGUMENT; |
610 | |
611 | if (path != UACPI_NULL) { |
612 | node = uacpi_namespace_node_find(parent, path); |
613 | if (node == UACPI_NULL) |
614 | return UACPI_STATUS_NOT_FOUND; |
615 | } else { |
616 | node = parent; |
617 | } |
618 | |
619 | obj = uacpi_namespace_node_get_object(node); |
620 | if (obj->type != UACPI_OBJECT_METHOD) { |
621 | if (uacpi_likely(ret != UACPI_NULL)) { |
622 | *ret = obj; |
623 | uacpi_object_ref(obj); |
624 | } |
625 | |
626 | return UACPI_STATUS_OK; |
627 | } |
628 | |
629 | return uacpi_execute_control_method(scope: node, method: obj->method, args, ret); |
630 | } |
631 | |
632 | #define TRACE_BAD_RET(path_fmt, type, ...) \ |
633 | uacpi_warn( \ |
634 | "unexpected '%s' object returned by method "path_fmt \ |
635 | ", expected type mask: %08X\n", uacpi_object_type_to_string(type), \ |
636 | __VA_ARGS__ \ |
637 | ) |
638 | |
639 | #define TRACE_NO_RET(path_fmt, ...) \ |
640 | uacpi_warn( \ |
641 | "no value returned from method "path_fmt", expected type mask: " \ |
642 | "%08X\n", __VA_ARGS__ \ |
643 | ) |
644 | |
645 | static void trace_invalid_return_type( |
646 | uacpi_namespace_node *parent, const uacpi_char *path, |
647 | uacpi_u32 expected_mask, uacpi_object_type actual_type |
648 | ) |
649 | { |
650 | const uacpi_char *abs_path; |
651 | uacpi_bool dynamic_abs_path = UACPI_FALSE; |
652 | |
653 | if (parent == UACPI_NULL || (path != UACPI_NULL && path[0] == '\\')) { |
654 | abs_path = path; |
655 | } else { |
656 | abs_path = uacpi_namespace_node_generate_absolute_path(node: parent); |
657 | dynamic_abs_path = UACPI_TRUE; |
658 | } |
659 | |
660 | if (dynamic_abs_path && path != UACPI_NULL) { |
661 | if (actual_type == UACPI_OBJECT_UNINITIALIZED) |
662 | TRACE_NO_RET("%s.%s" , abs_path, path, expected_mask); |
663 | else |
664 | TRACE_BAD_RET("%s.%s" , actual_type, abs_path, path, expected_mask); |
665 | } else { |
666 | if (actual_type == UACPI_OBJECT_UNINITIALIZED) { |
667 | TRACE_NO_RET("%s" , abs_path, expected_mask); |
668 | } else { |
669 | TRACE_BAD_RET("%s" , actual_type, abs_path, expected_mask); |
670 | } |
671 | } |
672 | |
673 | if (dynamic_abs_path) |
674 | uacpi_free_dynamic_string(str: abs_path); |
675 | } |
676 | |
677 | uacpi_status uacpi_eval_typed( |
678 | uacpi_namespace_node *parent, const uacpi_char *path, |
679 | const uacpi_args *args, uacpi_u32 ret_mask, uacpi_object **out_obj |
680 | ) |
681 | { |
682 | uacpi_status ret; |
683 | uacpi_object *obj; |
684 | uacpi_object_type returned_type = UACPI_OBJECT_UNINITIALIZED; |
685 | |
686 | if (uacpi_unlikely(out_obj == UACPI_NULL)) |
687 | return UACPI_STATUS_INVALID_ARGUMENT; |
688 | |
689 | ret = uacpi_eval(parent, path, args, ret: &obj); |
690 | if (uacpi_unlikely_error(ret)) |
691 | return ret; |
692 | |
693 | if (obj != UACPI_NULL) |
694 | returned_type = obj->type; |
695 | |
696 | if (ret_mask && (ret_mask & (1 << returned_type)) == 0) { |
697 | trace_invalid_return_type(parent, path, expected_mask: ret_mask, actual_type: returned_type); |
698 | uacpi_object_unref(obj); |
699 | return UACPI_STATUS_TYPE_MISMATCH; |
700 | } |
701 | |
702 | *out_obj = obj; |
703 | return UACPI_STATUS_OK; |
704 | } |
705 | |
706 | uacpi_status uacpi_eval_integer( |
707 | uacpi_namespace_node *parent, const uacpi_char *path, |
708 | const uacpi_args *args, uacpi_u64 *out_value |
709 | ) |
710 | { |
711 | uacpi_object *int_obj; |
712 | uacpi_status ret; |
713 | |
714 | ret = uacpi_eval_typed( |
715 | parent, path, args, UACPI_OBJECT_INTEGER_BIT, out_obj: &int_obj |
716 | ); |
717 | if (uacpi_unlikely_error(ret)) |
718 | return ret; |
719 | |
720 | *out_value = int_obj->integer; |
721 | uacpi_object_unref(obj: int_obj); |
722 | |
723 | return UACPI_STATUS_OK; |
724 | } |
725 | |