1#include <uacpi/internal/types.h>
2#include <uacpi/internal/interpreter.h>
3#include <uacpi/internal/dynamic_array.h>
4#include <uacpi/internal/opcodes.h>
5#include <uacpi/internal/namespace.h>
6#include <uacpi/internal/stdlib.h>
7#include <uacpi/internal/context.h>
8#include <uacpi/internal/shareable.h>
9#include <uacpi/internal/tables.h>
10#include <uacpi/internal/helpers.h>
11#include <uacpi/kernel_api.h>
12#include <uacpi/internal/utilities.h>
13#include <uacpi/internal/opregion.h>
14#include <uacpi/internal/io.h>
15#include <uacpi/internal/notify.h>
16#include <uacpi/internal/resources.h>
17#include <uacpi/internal/event.h>
18#include <uacpi/internal/mutex.h>
19#include <uacpi/internal/osi.h>
20
21enum item_type {
22 ITEM_NONE = 0,
23 ITEM_NAMESPACE_NODE,
24 ITEM_NAMESPACE_NODE_METHOD_LOCAL,
25 ITEM_OBJECT,
26 ITEM_EMPTY_OBJECT,
27 ITEM_PACKAGE_LENGTH,
28 ITEM_IMMEDIATE,
29};
30
31struct package_length {
32 uacpi_u32 begin;
33 uacpi_u32 end;
34};
35
36struct item {
37 uacpi_u8 type;
38 union {
39 uacpi_handle handle;
40 uacpi_object *obj;
41 struct uacpi_namespace_node *node;
42 struct package_length pkg;
43 uacpi_u64 immediate;
44 uacpi_u8 immediate_bytes[8];
45 };
46};
47
48DYNAMIC_ARRAY_WITH_INLINE_STORAGE(item_array, struct item, 8)
49DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(item_array, struct item, static)
50
51struct op_context {
52 uacpi_u8 pc;
53 uacpi_bool preempted;
54
55 /*
56 * == 0 -> none
57 * >= 1 -> item[idx - 1]
58 */
59 uacpi_u8 tracked_pkg_idx;
60
61 const struct uacpi_op_spec *op;
62 struct item_array items;
63};
64
65DYNAMIC_ARRAY_WITH_INLINE_STORAGE(op_context_array, struct op_context, 8)
66DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
67 op_context_array, struct op_context, static
68)
69
70static struct op_context *op_context_array_one_before_last(
71 struct op_context_array *arr
72)
73{
74 uacpi_size size;
75
76 size = op_context_array_size(arr);
77
78 if (size < 2)
79 return UACPI_NULL;
80
81 return op_context_array_at(arr, idx: size - 2);
82}
83
84enum code_block_type {
85 CODE_BLOCK_IF = 1,
86 CODE_BLOCK_ELSE = 2,
87 CODE_BLOCK_WHILE = 3,
88 CODE_BLOCK_SCOPE = 4,
89};
90
91struct code_block {
92 enum code_block_type type;
93 uacpi_u32 begin, end;
94 union {
95 struct uacpi_namespace_node *node;
96 uacpi_u64 expiration_point;
97 };
98};
99
100DYNAMIC_ARRAY_WITH_INLINE_STORAGE(code_block_array, struct code_block, 8)
101DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
102 code_block_array, struct code_block, static
103)
104
105DYNAMIC_ARRAY_WITH_INLINE_STORAGE(held_mutexes_array, uacpi_mutex*, 8)
106DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
107 held_mutexes_array, uacpi_mutex*, static
108)
109
110static uacpi_status held_mutexes_array_push(
111 struct held_mutexes_array *arr, uacpi_mutex *mutex
112)
113{
114 uacpi_mutex **slot;
115
116 slot = held_mutexes_array_alloc(arr);
117 if (uacpi_unlikely(slot == UACPI_NULL))
118 return UACPI_STATUS_OUT_OF_MEMORY;
119
120 *slot = mutex;
121 uacpi_shareable_ref(mutex);
122 return UACPI_STATUS_OK;
123}
124
125static void held_mutexes_array_remove_idx(
126 struct held_mutexes_array *arr, uacpi_size i
127)
128{
129 uacpi_size size;
130
131 size = held_mutexes_array_inline_capacity(arr);
132
133 // Only the dynamic array part is affected
134 if (i >= size) {
135 i -= size;
136 size = arr->size_including_inline - size;
137 size -= i + 1;
138
139 uacpi_memmove(
140 dest: &arr->dynamic_storage[i], src: &arr->dynamic_storage[i + 1],
141 count: size * sizeof(arr->inline_storage[0])
142 );
143
144 held_mutexes_array_pop(arr);
145 return;
146 }
147
148 size = UACPI_MIN(held_mutexes_array_inline_capacity(arr),
149 arr->size_including_inline);
150 size -= i + 1;
151 uacpi_memmove(
152 dest: &arr->inline_storage[i], src: &arr->inline_storage[i + 1],
153 count: size * sizeof(arr->inline_storage[0])
154 );
155
156 size = held_mutexes_array_size(arr);
157 i = held_mutexes_array_inline_capacity(arr);
158
159 /*
160 * This array has dynamic storage as well, now we have to take the first
161 * dynamic item, move it to the top of inline storage, and then shift all
162 * dynamic items backward by 1 as well.
163 */
164 if (size > i) {
165 arr->inline_storage[i - 1] = arr->dynamic_storage[0];
166 size -= i + 1;
167
168 uacpi_memmove(
169 dest: &arr->dynamic_storage[0], src: &arr->dynamic_storage[1],
170 count: size * sizeof(arr->inline_storage[0])
171 );
172 }
173
174 held_mutexes_array_pop(arr);
175}
176
177enum force_release {
178 FORCE_RELEASE_NO,
179 FORCE_RELEASE_YES,
180};
181
182static uacpi_status held_mutexes_array_remove_and_release(
183 struct held_mutexes_array *arr, uacpi_mutex *mutex,
184 enum force_release force
185)
186{
187 uacpi_mutex *item;
188 uacpi_size i;
189
190 if (uacpi_unlikely(held_mutexes_array_size(arr) == 0))
191 return UACPI_STATUS_INVALID_ARGUMENT;
192
193 item = *held_mutexes_array_last(arr);
194
195 if (uacpi_unlikely(item->sync_level != mutex->sync_level &&
196 force != FORCE_RELEASE_YES)) {
197 uacpi_warn(
198 "ignoring mutex @%p release due to sync level mismatch: %d vs %d\n",
199 mutex, mutex->sync_level, item->sync_level
200 );
201
202 // We still return OK because we don't want to abort because of this
203 return UACPI_STATUS_OK;
204 }
205
206 if (mutex->depth > 1 && force == FORCE_RELEASE_NO) {
207 uacpi_release_aml_mutex(mutex);
208 return UACPI_STATUS_OK;
209 }
210
211 // Fast path for well-behaved AML that releases mutexes in descending order
212 if (uacpi_likely(item == mutex)) {
213 held_mutexes_array_pop(arr);
214 goto do_release;
215 }
216
217 /*
218 * The mutex being released is not the last one acquired, although we did
219 * verify that at least it has the same sync level. Anyway, now we have
220 * to search for it and then remove it from the array while shifting
221 * everything backwards.
222 */
223 i = held_mutexes_array_size(arr);
224 for (;;) {
225 item = *held_mutexes_array_at(arr, idx: --i);
226 if (item == mutex)
227 break;
228
229 if (uacpi_unlikely(i == 0))
230 return UACPI_STATUS_INVALID_ARGUMENT;
231 }
232
233 held_mutexes_array_remove_idx(arr, i);
234
235do_release:
236 // This is either a force release, or depth was already 1 to begin with
237 mutex->depth = 1;
238 uacpi_release_aml_mutex(mutex);
239
240 uacpi_mutex_unref(mutex);
241 return UACPI_STATUS_OK;
242}
243
244DYNAMIC_ARRAY_WITH_INLINE_STORAGE(
245 temp_namespace_node_array, uacpi_namespace_node*, 8)
246DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
247 temp_namespace_node_array, uacpi_namespace_node*, static
248)
249
250static uacpi_status temp_namespace_node_array_push(
251 struct temp_namespace_node_array *arr, uacpi_namespace_node *node
252)
253{
254 uacpi_namespace_node **slot;
255
256 slot = temp_namespace_node_array_alloc(arr);
257 if (uacpi_unlikely(slot == UACPI_NULL))
258 return UACPI_STATUS_OUT_OF_MEMORY;
259
260 *slot = node;
261 return UACPI_STATUS_OK;
262}
263
264struct call_frame {
265 struct uacpi_control_method *method;
266
267 uacpi_object *args[7];
268 uacpi_object *locals[8];
269
270 struct op_context_array pending_ops;
271 struct code_block_array code_blocks;
272 struct temp_namespace_node_array temp_nodes;
273 struct code_block *last_while;
274 uacpi_u64 prev_while_expiration;
275 uacpi_u32 prev_while_code_offset;
276
277 uacpi_u32 code_offset;
278
279 struct uacpi_namespace_node *cur_scope;
280
281 // Only used if the method is serialized
282 uacpi_u8 prev_sync_level;
283};
284
285static void *call_frame_cursor(struct call_frame *frame)
286{
287 return frame->method->code + frame->code_offset;
288}
289
290static uacpi_size call_frame_code_bytes_left(struct call_frame *frame)
291{
292 return frame->method->size - frame->code_offset;
293}
294
295static uacpi_bool call_frame_has_code(struct call_frame *frame)
296{
297 return call_frame_code_bytes_left(frame) > 0;
298}
299
300DYNAMIC_ARRAY_WITH_INLINE_STORAGE(call_frame_array, struct call_frame, 4)
301DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(
302 call_frame_array, struct call_frame, static
303)
304
305static struct call_frame *call_frame_array_one_before_last(
306 struct call_frame_array *arr
307)
308{
309 uacpi_size size;
310
311 size = call_frame_array_size(arr);
312
313 if (size < 2)
314 return UACPI_NULL;
315
316 return call_frame_array_at(arr, idx: size - 2);
317}
318
319// NOTE: Try to keep size under 2 pages
320struct execution_context {
321 uacpi_object *ret;
322 struct call_frame_array call_stack;
323 struct held_mutexes_array held_mutexes;
324
325 struct call_frame *cur_frame;
326 struct code_block *cur_block;
327 const struct uacpi_op_spec *cur_op;
328 struct op_context *prev_op_ctx;
329 struct op_context *cur_op_ctx;
330
331 uacpi_bool skip_else;
332 uacpi_u8 sync_level;
333};
334
335#define AML_READ(ptr, offset) (*(((uacpi_u8*)(code)) + offset))
336
337static uacpi_status parse_nameseg(uacpi_u8 *cursor,
338 uacpi_object_name *out_name)
339{
340 if (uacpi_unlikely(!uacpi_is_valid_nameseg(cursor)))
341 return UACPI_STATUS_AML_INVALID_NAMESTRING;
342
343 uacpi_memcpy(dest: &out_name->id, src: cursor, count: 4);
344 return UACPI_STATUS_OK;
345}
346
347/*
348 * -------------------------------------------------------------
349 * RootChar := ‘\’
350 * ParentPrefixChar := ‘^’
351 * ‘\’ := 0x5C
352 * ‘^’ := 0x5E
353 * ------------------------------------------------------------
354 * NameSeg := <leadnamechar namechar namechar namechar>
355 * NameString := <rootchar namepath> | <prefixpath namepath>
356 * PrefixPath := Nothing | <’^’ prefixpath>
357 * NamePath := NameSeg | DualNamePath | MultiNamePath | NullName
358 * DualNamePath := DualNamePrefix NameSeg NameSeg
359 * MultiNamePath := MultiNamePrefix SegCount NameSeg(SegCount)
360 */
361
362static uacpi_status name_string_to_path(
363 struct call_frame *frame, uacpi_size offset,
364 uacpi_char **out_string, uacpi_size *out_size
365)
366{
367 uacpi_size bytes_left, prefix_bytes, nameseg_bytes = 0, namesegs;
368 uacpi_char *base_cursor, *cursor;
369 uacpi_char prev_char;
370
371 bytes_left = frame->method->size - offset;
372 cursor = (uacpi_char*)frame->method->code + offset;
373 base_cursor = cursor;
374 namesegs = 0;
375
376 prefix_bytes = 0;
377 for (;;) {
378 if (uacpi_unlikely(bytes_left == 0))
379 return UACPI_STATUS_AML_INVALID_NAMESTRING;
380
381 prev_char = *cursor;
382
383 switch (prev_char) {
384 case '^':
385 case '\\':
386 prefix_bytes++;
387 cursor++;
388 bytes_left--;
389 break;
390 default:
391 break;
392 }
393
394 if (prev_char != '^')
395 break;
396 }
397
398 // At least a NullName byte is expected here
399 if (uacpi_unlikely(bytes_left == 0))
400 return UACPI_STATUS_AML_INVALID_NAMESTRING;
401
402 namesegs = 0;
403 bytes_left--;
404 switch (*cursor++)
405 {
406 case UACPI_DUAL_NAME_PREFIX:
407 namesegs = 2;
408 break;
409 case UACPI_MULTI_NAME_PREFIX:
410 if (uacpi_unlikely(bytes_left == 0))
411 return UACPI_STATUS_AML_INVALID_NAMESTRING;
412
413 namesegs = *(uacpi_u8*)cursor;
414 if (uacpi_unlikely(namesegs == 0)) {
415 uacpi_error("MultiNamePrefix but SegCount is 0\n");
416 return UACPI_STATUS_AML_INVALID_NAMESTRING;
417 }
418
419 cursor++;
420 bytes_left--;
421 break;
422 case UACPI_NULL_NAME:
423 break;
424 default:
425 /*
426 * Might be an invalid byte, but assume single nameseg for now,
427 * the code below will validate it for us.
428 */
429 cursor--;
430 bytes_left++;
431 namesegs = 1;
432 break;
433 }
434
435 if (uacpi_unlikely((namesegs * 4) > bytes_left))
436 return UACPI_STATUS_AML_INVALID_NAMESTRING;
437
438 if (namesegs) {
439 // 4 chars per nameseg
440 nameseg_bytes = namesegs * 4;
441
442 // dot separator for every nameseg
443 nameseg_bytes += namesegs - 1;
444 }
445
446 *out_size = nameseg_bytes + prefix_bytes + 1;
447
448 *out_string = uacpi_kernel_alloc(size: *out_size);
449 if (*out_string == UACPI_NULL)
450 return UACPI_STATUS_OUT_OF_MEMORY;
451
452 uacpi_memcpy(dest: *out_string, src: base_cursor, count: prefix_bytes);
453
454 base_cursor = *out_string;
455 base_cursor += prefix_bytes;
456
457 while (namesegs-- > 0) {
458 uacpi_memcpy(dest: base_cursor, src: cursor, count: 4);
459 cursor += 4;
460 base_cursor += 4;
461
462 if (namesegs)
463 *base_cursor++ = '.';
464 }
465
466 *base_cursor = '\0';
467 return UACPI_STATUS_OK;
468}
469
470enum resolve_behavior {
471 RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS,
472 RESOLVE_FAIL_IF_DOESNT_EXIST,
473};
474
475static uacpi_status resolve_name_string(
476 struct call_frame *frame,
477 enum resolve_behavior behavior,
478 struct uacpi_namespace_node **out_node
479)
480{
481 uacpi_status ret = UACPI_STATUS_OK;
482 uacpi_u8 *cursor;
483 uacpi_size bytes_left, namesegs = 0;
484 struct uacpi_namespace_node *parent, *cur_node = frame->cur_scope;
485 uacpi_char prev_char = 0;
486 uacpi_bool just_one_nameseg = UACPI_TRUE;
487
488 bytes_left = call_frame_code_bytes_left(frame);
489 cursor = call_frame_cursor(frame);
490
491 for (;;) {
492 if (uacpi_unlikely(bytes_left == 0))
493 return UACPI_STATUS_AML_INVALID_NAMESTRING;
494
495 switch (*cursor) {
496 case '\\':
497 if (prev_char == '^')
498 return UACPI_STATUS_AML_INVALID_NAMESTRING;
499
500 cur_node = uacpi_namespace_root();
501 break;
502 case '^':
503 // Tried to go behind root
504 if (uacpi_unlikely(cur_node == uacpi_namespace_root()))
505 return UACPI_STATUS_AML_INVALID_NAMESTRING;
506
507 cur_node = cur_node->parent;
508 break;
509 default:
510 break;
511 }
512
513 prev_char = *cursor;
514
515 switch (prev_char) {
516 case '^':
517 case '\\':
518 just_one_nameseg = UACPI_FALSE;
519 cursor++;
520 bytes_left--;
521 break;
522 default:
523 break;
524 }
525
526 if (prev_char != '^')
527 break;
528 }
529
530 // At least a NullName byte is expected here
531 if (uacpi_unlikely(bytes_left == 0))
532 return UACPI_STATUS_AML_INVALID_NAMESTRING;
533
534 bytes_left--;
535 switch (*cursor++)
536 {
537 case UACPI_DUAL_NAME_PREFIX:
538 namesegs = 2;
539 just_one_nameseg = UACPI_FALSE;
540 break;
541 case UACPI_MULTI_NAME_PREFIX:
542 if (uacpi_unlikely(bytes_left == 0))
543 return UACPI_STATUS_AML_INVALID_NAMESTRING;
544
545 namesegs = *cursor;
546 if (uacpi_unlikely(namesegs == 0)) {
547 uacpi_error("MultiNamePrefix but SegCount is 0\n");
548 return UACPI_STATUS_AML_INVALID_NAMESTRING;
549 }
550
551 cursor++;
552 bytes_left--;
553 just_one_nameseg = UACPI_FALSE;
554 break;
555 case UACPI_NULL_NAME:
556 if (behavior == RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS ||
557 just_one_nameseg)
558 return UACPI_STATUS_AML_INVALID_NAMESTRING;
559
560 goto out;
561 default:
562 /*
563 * Might be an invalid byte, but assume single nameseg for now,
564 * the code below will validate it for us.
565 */
566 cursor--;
567 bytes_left++;
568 namesegs = 1;
569 break;
570 }
571
572 if (uacpi_unlikely((namesegs * 4) > bytes_left))
573 return UACPI_STATUS_AML_INVALID_NAMESTRING;
574
575 for (; namesegs; cursor += 4, namesegs--) {
576 uacpi_object_name name;
577
578 ret = parse_nameseg(cursor, out_name: &name);
579 if (uacpi_unlikely_error(ret))
580 return ret;
581
582 parent = cur_node;
583 cur_node = uacpi_namespace_node_find_sub_node(parent, name);
584
585 switch (behavior) {
586 case RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS:
587 if (namesegs == 1) {
588 if (cur_node) {
589 cur_node = UACPI_NULL;
590 ret = UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS;
591 goto out;
592 }
593
594 // Create the node and link to parent but don't install YET
595 cur_node = uacpi_namespace_node_alloc(name);
596 cur_node->parent = parent;
597 }
598 break;
599 case RESOLVE_FAIL_IF_DOESNT_EXIST:
600 if (just_one_nameseg) {
601 while (!cur_node && parent != uacpi_namespace_root()) {
602 cur_node = parent;
603 parent = cur_node->parent;
604
605 cur_node = uacpi_namespace_node_find_sub_node(parent, name);
606 }
607 }
608 break;
609 default:
610 return UACPI_STATUS_INVALID_ARGUMENT;
611 }
612
613 if (cur_node == UACPI_NULL) {
614 ret = UACPI_STATUS_NOT_FOUND;
615 break;
616 }
617 }
618
619out:
620 cursor += namesegs * 4;
621 frame->code_offset = cursor - frame->method->code;
622 *out_node = cur_node;
623 return ret;
624}
625
626static uacpi_status do_install_node_item(struct call_frame *frame,
627 struct item *item)
628{
629 uacpi_status ret;
630
631 ret = uacpi_node_install(parent: item->node->parent, node: item->node);
632 if (uacpi_unlikely_error(ret))
633 return ret;
634
635 if (!frame->method->named_objects_persist)
636 ret = temp_namespace_node_array_push(arr: &frame->temp_nodes, node: item->node);
637
638 if (uacpi_likely_success(ret))
639 item->node = UACPI_NULL;
640
641 return ret;
642}
643
644static uacpi_status get_op(struct execution_context *ctx)
645{
646 uacpi_aml_op op;
647 struct call_frame *frame = ctx->cur_frame;
648 void *code = frame->method->code;
649 uacpi_size size = frame->method->size;
650
651 if (uacpi_unlikely(frame->code_offset >= size))
652 return UACPI_STATUS_AML_BAD_ENCODING;
653
654 op = AML_READ(code, frame->code_offset++);
655 if (op == UACPI_EXT_PREFIX) {
656 if (uacpi_unlikely(frame->code_offset >= size))
657 return UACPI_STATUS_AML_BAD_ENCODING;
658
659 op <<= 8;
660 op |= AML_READ(code, frame->code_offset++);
661 }
662
663 ctx->cur_op = uacpi_get_op_spec(op);
664 if (uacpi_unlikely(ctx->cur_op->properties & UACPI_OP_PROPERTY_RESERVED)) {
665 uacpi_error(
666 "invalid opcode '%s' encountered in bytestream\n",
667 ctx->cur_op->name
668 );
669 return UACPI_STATUS_AML_INVALID_OPCODE;
670 }
671
672 return UACPI_STATUS_OK;
673}
674
675static uacpi_status handle_buffer(struct execution_context *ctx)
676{
677 struct package_length *pkg;
678 uacpi_u8 *src;
679 uacpi_object *dst, *declared_size;
680 uacpi_u32 buffer_size, init_size, aml_offset;
681 struct op_context *op_ctx = ctx->cur_op_ctx;
682
683 aml_offset = item_array_at(arr: &op_ctx->items, idx: 2)->immediate;
684 src = ctx->cur_frame->method->code;
685 src += aml_offset;
686
687 pkg = &item_array_at(arr: &op_ctx->items, idx: 0)->pkg;
688 init_size = pkg->end - aml_offset;
689
690 // TODO: do package bounds checking at parse time
691 if (uacpi_unlikely(pkg->end > ctx->cur_frame->method->size))
692 return UACPI_STATUS_AML_BAD_ENCODING;
693
694 declared_size = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
695
696 if (uacpi_unlikely(declared_size->integer > 0xE0000000)) {
697 uacpi_error(
698 "buffer is too large (%"UACPI_PRIu64"), assuming corrupted "
699 "bytestream\n", UACPI_FMT64(declared_size->integer)
700 );
701 return UACPI_STATUS_AML_BAD_ENCODING;
702 }
703
704 if (uacpi_unlikely(declared_size->integer == 0)) {
705 uacpi_error("attempted to create an empty buffer\n");
706 return UACPI_STATUS_AML_BAD_ENCODING;
707 }
708
709 buffer_size = declared_size->integer;
710 if (uacpi_unlikely(init_size > buffer_size)) {
711 uacpi_error(
712 "too many buffer initializers: %u (size is %u)\n",
713 init_size, buffer_size
714 );
715 return UACPI_STATUS_AML_BAD_ENCODING;
716 }
717
718 dst = item_array_at(arr: &op_ctx->items, idx: 3)->obj;
719 dst->buffer->data = uacpi_kernel_alloc(size: buffer_size);
720 if (uacpi_unlikely(dst->buffer->data == UACPI_NULL))
721 return UACPI_STATUS_OUT_OF_MEMORY;
722 dst->buffer->size = buffer_size;
723
724 uacpi_memcpy_zerout(dst: dst->buffer->data, src, dst_size: buffer_size, src_size: init_size);
725 return UACPI_STATUS_OK;
726}
727
728uacpi_status handle_string(struct execution_context *ctx)
729{
730 struct call_frame *frame = ctx->cur_frame;
731 uacpi_object *obj;
732
733 uacpi_char *string;
734 uacpi_size length, max_bytes;
735
736 obj = item_array_last(arr: &ctx->cur_op_ctx->items)->obj;
737 string = call_frame_cursor(frame);
738
739 // TODO: sanitize string for valid UTF-8
740 max_bytes = call_frame_code_bytes_left(frame);
741 length = uacpi_strnlen(str: string, max: max_bytes);
742
743 if (uacpi_unlikely((length == max_bytes) || (string[length++] != 0x00)))
744 return UACPI_STATUS_AML_BAD_ENCODING;
745
746 obj->buffer->text = uacpi_kernel_alloc(size: length);
747 if (uacpi_unlikely(obj->buffer->text == UACPI_NULL))
748 return UACPI_STATUS_OUT_OF_MEMORY;
749
750 uacpi_memcpy(dest: obj->buffer->text, src: string, count: length);
751 obj->buffer->size = length;
752 frame->code_offset += length;
753 return UACPI_STATUS_OK;
754}
755
756static uacpi_status handle_package(struct execution_context *ctx)
757{
758 struct op_context *op_ctx = ctx->cur_op_ctx;
759 uacpi_package *package;
760 uacpi_u32 num_elements, num_defined_elements, i;
761
762 /*
763 * Layout of items here:
764 * [0] -> Package length, not interesting
765 * [1] -> Immediate or integer object, depending on PackageOp/VarPackageOp
766 * [2..N-2] -> AML pc+Package element pairs
767 * [N-1] -> The resulting package object that we're constructing
768 */
769 package = item_array_last(arr: &op_ctx->items)->obj->package;
770
771 // 1. Detect how many elements we have, do sanity checking
772 if (op_ctx->op->code == UACPI_AML_OP_VarPackageOp) {
773 uacpi_object *var_num_elements;
774
775 var_num_elements = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
776 if (uacpi_unlikely(var_num_elements->integer > 0xE0000000)) {
777 uacpi_error(
778 "package is too large (%"UACPI_PRIu64"), assuming "
779 "corrupted bytestream\n", UACPI_FMT64(var_num_elements->integer)
780 );
781 return UACPI_STATUS_AML_BAD_ENCODING;
782 }
783 num_elements = var_num_elements->integer;
784 } else {
785 num_elements = item_array_at(arr: &op_ctx->items, idx: 1)->immediate;
786 }
787
788 num_defined_elements = (item_array_size(arr: &op_ctx->items) - 3) / 2;
789 if (uacpi_unlikely(num_defined_elements > num_elements)) {
790 uacpi_warn(
791 "too many package initializers: %u, truncating to %u\n",
792 num_defined_elements, num_elements
793 );
794
795 num_defined_elements = num_elements;
796 }
797
798 // 2. Create every object in the package, start as uninitialized
799 if (uacpi_unlikely(!uacpi_package_fill(package, num_elements)))
800 return UACPI_STATUS_OUT_OF_MEMORY;
801
802 // 3. Go through every defined object and copy it into the package
803 for (i = 0; i < num_defined_elements; ++i) {
804 uacpi_size base_pkg_index;
805 uacpi_status ret;
806 struct item *item;
807 uacpi_object *obj;
808
809 base_pkg_index = (i * 2) + 2;
810 item = item_array_at(arr: &op_ctx->items, idx: base_pkg_index + 1);
811 obj = item->obj;
812
813 if (obj != UACPI_NULL && obj->type == UACPI_OBJECT_REFERENCE) {
814 /*
815 * For named objects we don't actually need the object itself, but
816 * simply the path to it. Often times objects referenced by the
817 * package are not defined until later so it's not possible to
818 * resolve them. For uniformity and to follow the behavior of NT,
819 * simply convert the name string to a path string object to be
820 * resolved later when actually needed.
821 */
822 if (obj->flags == UACPI_REFERENCE_KIND_NAMED) {
823 uacpi_object_unref(obj);
824 item->obj = UACPI_NULL;
825 obj = UACPI_NULL;
826 } else {
827 obj = uacpi_unwrap_internal_reference(object: obj);
828 }
829 }
830
831 if (obj == UACPI_NULL) {
832 uacpi_size length;
833 uacpi_char *path;
834
835 obj = uacpi_create_object(type: UACPI_OBJECT_STRING);
836 if (uacpi_unlikely(obj == UACPI_NULL))
837 return UACPI_STATUS_OUT_OF_MEMORY;
838
839 ret = name_string_to_path(
840 frame: ctx->cur_frame,
841 offset: item_array_at(arr: &op_ctx->items, idx: base_pkg_index)->immediate,
842 out_string: &path, out_size: &length
843 );
844 if (uacpi_unlikely_error(ret))
845 return ret;
846
847 obj->flags = UACPI_STRING_KIND_PATH;
848 obj->buffer->text = path;
849 obj->buffer->size = length;
850
851 item->obj = obj;
852 item->type = ITEM_OBJECT;
853 }
854
855 ret = uacpi_object_assign(dst: package->objects[i], src: obj,
856 UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
857 if (uacpi_unlikely_error(ret))
858 return ret;
859 }
860
861 return UACPI_STATUS_OK;
862}
863
864static uacpi_size field_byte_size(uacpi_object *obj)
865{
866 uacpi_size bit_length;
867
868 if (obj->type == UACPI_OBJECT_BUFFER_FIELD)
869 bit_length = obj->buffer_field.bit_length;
870 else
871 bit_length = obj->field_unit->bit_length;
872
873 return uacpi_round_up_bits_to_bytes(bit_length);
874}
875
876static uacpi_size sizeof_int()
877{
878 return g_uacpi_rt_ctx.is_rev1 ? 4 : 8;
879}
880
881struct object_storage_as_buffer {
882 void *ptr;
883 uacpi_size len;
884};
885
886static uacpi_status get_object_storage(uacpi_object *obj,
887 struct object_storage_as_buffer *out_buf,
888 uacpi_bool include_null)
889{
890 switch (obj->type) {
891 case UACPI_OBJECT_INTEGER:
892 out_buf->len = sizeof_int();
893 out_buf->ptr = &obj->integer;
894 break;
895 case UACPI_OBJECT_STRING:
896 out_buf->len = obj->buffer->size;
897 if (out_buf->len && !include_null)
898 out_buf->len--;
899
900 out_buf->ptr = obj->buffer->text;
901 break;
902 case UACPI_OBJECT_BUFFER:
903 if (obj->buffer->size == 0) {
904 out_buf->len = 0;
905 break;
906 }
907
908 out_buf->len = obj->buffer->size;
909 out_buf->ptr = obj->buffer->data;
910 break;
911 case UACPI_OBJECT_REFERENCE:
912 return UACPI_STATUS_INVALID_ARGUMENT;
913 default:
914 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
915 }
916
917 return UACPI_STATUS_OK;
918}
919
920static uacpi_u8 *buffer_index_cursor(uacpi_buffer_index *buf_idx)
921{
922 uacpi_u8 *out_cursor;
923
924 out_cursor = buf_idx->buffer->data;
925 out_cursor += buf_idx->idx;
926
927 return out_cursor;
928}
929
930static void write_buffer_index(uacpi_buffer_index *buf_idx,
931 struct object_storage_as_buffer *src_buf)
932{
933 uacpi_memcpy_zerout(dst: buffer_index_cursor(buf_idx), src: src_buf->ptr,
934 dst_size: 1, src_size: src_buf->len);
935}
936
937/*
938 * The word "implicit cast" here is only because it's called that in
939 * the specification. In reality, we just copy one buffer to another
940 * because that's what NT does.
941 */
942static uacpi_status object_assign_with_implicit_cast(uacpi_object *dst,
943 uacpi_object *src)
944{
945 uacpi_status ret;
946 struct object_storage_as_buffer src_buf;
947
948 ret = get_object_storage(obj: src, out_buf: &src_buf, UACPI_FALSE);
949 if (uacpi_unlikely_error(ret))
950 goto out_bad_cast;
951
952 switch (dst->type) {
953 case UACPI_OBJECT_INTEGER:
954 case UACPI_OBJECT_STRING:
955 case UACPI_OBJECT_BUFFER: {
956 struct object_storage_as_buffer dst_buf;
957
958 ret = get_object_storage(obj: dst, out_buf: &dst_buf, UACPI_FALSE);
959 if (uacpi_unlikely_error(ret))
960 goto out_bad_cast;
961
962 uacpi_memcpy_zerout(dst: dst_buf.ptr, src: src_buf.ptr, dst_size: dst_buf.len, src_size: src_buf.len);
963 break;
964 }
965
966 case UACPI_OBJECT_BUFFER_FIELD:
967 uacpi_write_buffer_field(field: &dst->buffer_field, src: src_buf.ptr, size: src_buf.len);
968 break;
969
970 case UACPI_OBJECT_FIELD_UNIT:
971 return uacpi_write_field_unit(
972 field: dst->field_unit, src: src_buf.ptr, size: src_buf.len
973 );
974
975 case UACPI_OBJECT_BUFFER_INDEX:
976 write_buffer_index(buf_idx: &dst->buffer_index, src_buf: &src_buf);
977 break;
978
979 default:
980 ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
981 goto out_bad_cast;
982 }
983
984 return ret;
985
986out_bad_cast:
987 uacpi_error(
988 "attempted to perform an invalid implicit cast (%s -> %s)\n",
989 uacpi_object_type_to_string(src->type),
990 uacpi_object_type_to_string(dst->type)
991 );
992 return ret;
993}
994
995enum argx_or_localx {
996 ARGX,
997 LOCALX,
998};
999
1000static uacpi_status handle_arg_or_local(
1001 struct execution_context *ctx,
1002 uacpi_size idx, enum argx_or_localx type
1003)
1004{
1005 uacpi_object **src;
1006 struct item *dst;
1007 enum uacpi_reference_kind kind;
1008
1009 if (type == ARGX) {
1010 src = &ctx->cur_frame->args[idx];
1011 kind = UACPI_REFERENCE_KIND_ARG;
1012 } else {
1013 src = &ctx->cur_frame->locals[idx];
1014 kind = UACPI_REFERENCE_KIND_LOCAL;
1015 }
1016
1017 if (*src == UACPI_NULL) {
1018 uacpi_object *default_value;
1019
1020 default_value = uacpi_create_object(type: UACPI_OBJECT_UNINITIALIZED);
1021 if (uacpi_unlikely(default_value == UACPI_NULL))
1022 return UACPI_STATUS_OUT_OF_MEMORY;
1023
1024 *src = uacpi_create_internal_reference(kind, child: default_value);
1025 if (uacpi_unlikely(*src == UACPI_NULL))
1026 return UACPI_STATUS_OUT_OF_MEMORY;
1027
1028 uacpi_object_unref(obj: default_value);
1029 }
1030
1031 dst = item_array_last(arr: &ctx->cur_op_ctx->items);
1032 dst->obj = *src;
1033 dst->type = ITEM_OBJECT;
1034 uacpi_object_ref(obj: dst->obj);
1035
1036 return UACPI_STATUS_OK;
1037}
1038
1039static uacpi_status handle_local(struct execution_context *ctx)
1040{
1041 uacpi_size idx;
1042 struct op_context *op_ctx = ctx->cur_op_ctx;
1043
1044 idx = op_ctx->op->code - UACPI_AML_OP_Local0Op;
1045 return handle_arg_or_local(ctx, idx, type: LOCALX);
1046}
1047
1048static uacpi_status handle_arg(struct execution_context *ctx)
1049{
1050 uacpi_size idx;
1051 struct op_context *op_ctx = ctx->cur_op_ctx;
1052
1053 idx = op_ctx->op->code - UACPI_AML_OP_Arg0Op;
1054 return handle_arg_or_local(ctx, idx, type: ARGX);
1055}
1056
1057static uacpi_status handle_named_object(struct execution_context *ctx)
1058{
1059 struct uacpi_namespace_node *src;
1060 struct item *dst;
1061
1062 src = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 0)->node;
1063 dst = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 1);
1064
1065 dst->obj = src->object;
1066 dst->type = ITEM_OBJECT;
1067 uacpi_object_ref(obj: dst->obj);
1068
1069 return UACPI_STATUS_OK;
1070}
1071
1072static uacpi_status handle_create_alias(struct execution_context *ctx)
1073{
1074 uacpi_namespace_node *src, *dst;
1075
1076 src = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 0)->node;
1077 dst = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 1)->node;
1078
1079 dst->object = src->object;
1080 dst->flags = UACPI_NAMESPACE_NODE_FLAG_ALIAS;
1081 uacpi_object_ref(obj: dst->object);
1082
1083 return UACPI_STATUS_OK;
1084}
1085
1086static uacpi_status handle_create_op_region(struct execution_context *ctx)
1087{
1088 uacpi_namespace_node *node;
1089 uacpi_object *obj;
1090 uacpi_operation_region *op_region;
1091 uacpi_u64 region_end;
1092
1093 node = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 0)->node;
1094 obj = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 4)->obj;
1095 op_region = obj->op_region;
1096
1097 op_region->space = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 1)->immediate;
1098 op_region->offset = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 2)->obj->integer;
1099 op_region->length = item_array_at(arr: &ctx->cur_op_ctx->items, idx: 3)->obj->integer;
1100 region_end = op_region->offset + op_region->length;
1101
1102 if (uacpi_unlikely(op_region->length == 0)) {
1103 // Don't abort here, as long as it's never accessed we don't care
1104 uacpi_warn("unusable/empty operation region %.4s\n", node->name.text);
1105 } else if (uacpi_unlikely(op_region->offset > region_end)) {
1106 uacpi_error(
1107 "invalid operation region %.4s bounds: offset=0x%"UACPI_PRIX64
1108 " length=0x%"UACPI_PRIX64"\n", node->name.text,
1109 UACPI_FMT64(op_region->offset), UACPI_FMT64(op_region->length)
1110 );
1111 return UACPI_STATUS_AML_BAD_ENCODING;
1112 }
1113
1114 node->object = uacpi_create_internal_reference(
1115 kind: UACPI_REFERENCE_KIND_NAMED, child: obj
1116 );
1117 if (uacpi_unlikely(node->object == UACPI_NULL))
1118 return UACPI_STATUS_OUT_OF_MEMORY;
1119
1120 if (uacpi_opregion_find_and_install_handler(node) == UACPI_STATUS_OK)
1121 uacpi_opregion_reg(node);
1122
1123 return UACPI_STATUS_OK;
1124}
1125
1126static uacpi_status table_id_error(
1127 const uacpi_char *opcode, const uacpi_char *arg,
1128 uacpi_buffer *str
1129)
1130{
1131 uacpi_error("%s: invalid %s '%s'\n", opcode, arg, str->text);
1132 return UACPI_STATUS_AML_BAD_ENCODING;
1133}
1134
1135static void report_table_id_find_error(
1136 const uacpi_char *opcode, struct uacpi_table_identifiers *id,
1137 uacpi_status ret
1138)
1139{
1140 uacpi_error(
1141 "%s: unable to find table '%.4s' (OEM ID '%.6s', "
1142 "OEM Table ID '%.8s'): %s\n",
1143 opcode, id->signature.text, id->oemid, id->oem_table_id,
1144 uacpi_status_to_string(ret)
1145 );
1146}
1147
1148static uacpi_status build_table_id(
1149 const uacpi_char *opcode,
1150 struct uacpi_table_identifiers *out_id,
1151 uacpi_buffer *signature, uacpi_buffer *oem_id,
1152 uacpi_buffer *oem_table_id
1153)
1154{
1155 if (uacpi_unlikely(signature->size != (sizeof(uacpi_object_name) + 1)))
1156 return table_id_error(opcode, arg: "SignatureString", str: signature);
1157
1158 uacpi_memcpy(dest: out_id->signature.text, src: signature->text,
1159 count: sizeof(uacpi_object_name));
1160
1161 if (uacpi_unlikely(oem_id->size > (sizeof(out_id->oemid) + 1)))
1162 return table_id_error(opcode, arg: "OemIDString", str: oem_id);
1163
1164 uacpi_memcpy_zerout(
1165 dst: out_id->oemid, src: oem_id->text,
1166 dst_size: sizeof(out_id->oemid), src_size: oem_id->size ? oem_id->size - 1 : 0
1167 );
1168
1169 if (uacpi_unlikely(oem_table_id->size > (sizeof(out_id->oem_table_id) + 1)))
1170 return table_id_error(opcode, arg: "OemTableIDString", str: oem_table_id);
1171
1172 uacpi_memcpy_zerout(
1173 dst: out_id->oem_table_id, src: oem_table_id->text,
1174 dst_size: sizeof(out_id->oem_table_id),
1175 src_size: oem_table_id->size ? oem_table_id->size - 1 : 0
1176 );
1177
1178 return UACPI_STATUS_OK;
1179}
1180
1181static uacpi_status handle_create_data_region(struct execution_context *ctx)
1182{
1183 uacpi_status ret;
1184 struct item_array *items = &ctx->cur_op_ctx->items;
1185 struct uacpi_table_identifiers table_id;
1186 uacpi_table table;
1187 uacpi_namespace_node *node;
1188 uacpi_object *obj;
1189 uacpi_operation_region *op_region;
1190
1191 node = item_array_at(arr: items, idx: 0)->node;
1192
1193 ret = build_table_id(
1194 opcode: "DataTableRegion", out_id: &table_id,
1195 signature: item_array_at(arr: items, idx: 1)->obj->buffer,
1196 oem_id: item_array_at(arr: items, idx: 2)->obj->buffer,
1197 oem_table_id: item_array_at(arr: items, idx: 3)->obj->buffer
1198 );
1199 if (uacpi_unlikely_error(ret))
1200 return ret;
1201
1202 ret = uacpi_table_find(id: &table_id, out_table: &table);
1203 if (uacpi_unlikely_error(ret)) {
1204 report_table_id_find_error(opcode: "DataTableRegion", id: &table_id, ret);
1205 return ret;
1206 }
1207
1208 obj = item_array_at(arr: items, idx: 4)->obj;
1209 op_region = obj->op_region;
1210 op_region->space = UACPI_ADDRESS_SPACE_TABLE_DATA;
1211 op_region->offset = table.virt_addr;
1212 op_region->length = table.hdr->length;
1213
1214 node->object = uacpi_create_internal_reference(
1215 kind: UACPI_REFERENCE_KIND_NAMED, child: obj
1216 );
1217 if (uacpi_unlikely(node->object == UACPI_NULL))
1218 return UACPI_STATUS_OUT_OF_MEMORY;
1219
1220 if (uacpi_opregion_find_and_install_handler(node) == UACPI_STATUS_OK)
1221 uacpi_opregion_reg(node);
1222
1223 return UACPI_STATUS_OK;
1224}
1225
1226static uacpi_bool is_dynamic_table_load(enum uacpi_table_load_cause cause)
1227{
1228 return cause != UACPI_TABLE_LOAD_CAUSE_INIT;
1229}
1230
1231static void prepare_table_load(
1232 void *ptr, enum uacpi_table_load_cause cause, uacpi_control_method *in_method
1233)
1234{
1235 struct acpi_dsdt *dsdt = ptr;
1236 enum uacpi_log_level log_level = UACPI_LOG_TRACE;
1237 const uacpi_char *log_prefix = "load of";
1238
1239 if (is_dynamic_table_load(cause)) {
1240 log_prefix = cause == UACPI_TABLE_LOAD_CAUSE_HOST ?
1241 "host-invoked load of" : "dynamic load of";
1242 log_level = UACPI_LOG_INFO;
1243 }
1244
1245 uacpi_log_lvl(
1246 log_level, "%s "UACPI_PRI_TBL_HDR"\n",
1247 log_prefix, UACPI_FMT_TBL_HDR(&dsdt->hdr)
1248 );
1249
1250 in_method->code = dsdt->definition_block;
1251 in_method->size = dsdt->hdr.length - sizeof(dsdt->hdr);
1252 in_method->named_objects_persist = UACPI_TRUE;
1253}
1254
1255static uacpi_status do_load_table(
1256 uacpi_namespace_node *parent, struct acpi_sdt_hdr *tbl,
1257 enum uacpi_table_load_cause cause
1258)
1259{
1260 struct uacpi_control_method method = { 0 };
1261 uacpi_status ret;
1262
1263 prepare_table_load(ptr: tbl, cause, in_method: &method);
1264
1265 ret = uacpi_execute_control_method(scope: parent, method: &method, UACPI_NULL, UACPI_NULL);
1266 if (uacpi_unlikely_error(ret))
1267 return ret;
1268
1269 if (is_dynamic_table_load(cause))
1270 ret = uacpi_events_match_post_dynamic_table_load();
1271
1272 return ret;
1273}
1274
1275static uacpi_status handle_load_table(struct execution_context *ctx)
1276{
1277 uacpi_status ret;
1278 struct item_array *items = &ctx->cur_op_ctx->items;
1279 struct uacpi_table_identifiers table_id;
1280 uacpi_table table;
1281 uacpi_buffer *root_path, *param_path;
1282 uacpi_control_method *method;
1283 uacpi_namespace_node *root_node, *param_node = UACPI_NULL;
1284
1285 /*
1286 * If we already have the last true/false object loaded, this is a second
1287 * invocation of this handler. For the second invocation we want to detect
1288 * new AML GPE handlers that might've been loaded, as well as potentially
1289 * remove the target.
1290 */
1291 if (item_array_size(arr: items) == 11) {
1292 /*
1293 * If this load failed, remove the target that was provided via
1294 * ParameterPathString so that it doesn't get stored to.
1295 */
1296 if (uacpi_unlikely(item_array_at(items, 10)->obj->integer == 0)) {
1297 uacpi_object *target;
1298
1299 target = item_array_at(arr: items, idx: 2)->obj;
1300 if (target != UACPI_NULL) {
1301 uacpi_object_unref(obj: target);
1302 item_array_at(arr: items, idx: 2)->obj = UACPI_NULL;
1303 }
1304
1305 return UACPI_STATUS_OK;
1306 }
1307
1308 uacpi_events_match_post_dynamic_table_load();
1309 return UACPI_STATUS_OK;
1310 }
1311
1312 ret = build_table_id(
1313 opcode: "LoadTable", out_id: &table_id,
1314 signature: item_array_at(arr: items, idx: 4)->obj->buffer,
1315 oem_id: item_array_at(arr: items, idx: 5)->obj->buffer,
1316 oem_table_id: item_array_at(arr: items, idx: 6)->obj->buffer
1317 );
1318 if (uacpi_unlikely_error(ret))
1319 return ret;
1320
1321 root_path = item_array_at(arr: items, idx: 7)->obj->buffer;
1322 param_path = item_array_at(arr: items, idx: 8)->obj->buffer;
1323
1324 if (root_path->size > 1) {
1325 root_node = uacpi_namespace_node_resolve_from_aml_namepath(
1326 scope: ctx->cur_frame->cur_scope, path: root_path->text
1327 );
1328 if (uacpi_unlikely(root_node == UACPI_NULL))
1329 return table_id_error(opcode: "LoadTable", arg: "RootPathString", str: root_path);
1330 } else {
1331 root_node = uacpi_namespace_root();
1332 }
1333
1334 item_array_at(arr: items, idx: 0)->node = root_node;
1335
1336 if (param_path->size > 1) {
1337 struct item *param_item;
1338
1339 param_node = uacpi_namespace_node_resolve_from_aml_namepath(
1340 scope: root_node, path: param_path->text
1341 );
1342 if (uacpi_unlikely(param_node == UACPI_NULL)) {
1343 return table_id_error(
1344 opcode: "LoadTable", arg: "ParameterPathString", str: root_path
1345 );
1346 }
1347
1348 param_item = item_array_at(arr: items, idx: 2);
1349 param_item->obj = param_node->object;
1350 uacpi_object_ref(obj: param_item->obj);
1351 param_item->type = ITEM_OBJECT;
1352 }
1353
1354 ret = uacpi_table_find(id: &table_id, out_table: &table);
1355 if (uacpi_unlikely_error(ret)) {
1356 report_table_id_find_error(opcode: "LoadTable", id: &table_id, ret);
1357 return ret;
1358 }
1359 uacpi_table_mark_as_loaded(idx: table.index);
1360
1361 method = item_array_at(arr: items, idx: 1)->obj->method;
1362 prepare_table_load(ptr: table.hdr, cause: UACPI_TABLE_LOAD_CAUSE_LOAD_TABLE_OP, in_method: method);
1363
1364 return UACPI_STATUS_OK;
1365}
1366
1367static uacpi_status handle_load(struct execution_context *ctx)
1368{
1369 uacpi_status ret;
1370 struct item_array *items = &ctx->cur_op_ctx->items;
1371 uacpi_table table;
1372 uacpi_control_method *method;
1373 uacpi_object *src;
1374 struct acpi_sdt_hdr *src_table;
1375 void *table_buffer;
1376 uacpi_size declared_size;
1377 uacpi_bool unmap_src = UACPI_FALSE;
1378
1379 /*
1380 * If we already have the last true/false object loaded, this is a second
1381 * invocation of this handler. For the second invocation we simply want to
1382 * detect new AML GPE handlers that might've been loaded.
1383 * We do this only if table load was successful though.
1384 */
1385 if (item_array_size(arr: items) == 5) {
1386 if (item_array_at(arr: items, idx: 4)->obj->integer != 0)
1387 uacpi_events_match_post_dynamic_table_load();
1388 return UACPI_STATUS_OK;
1389 }
1390
1391 src = item_array_at(arr: items, idx: 2)->obj;
1392
1393 switch (src->type) {
1394 case UACPI_OBJECT_OPERATION_REGION: {
1395 uacpi_operation_region *op_region;
1396
1397 op_region = src->op_region;
1398 if (uacpi_unlikely(
1399 op_region->space != UACPI_ADDRESS_SPACE_SYSTEM_MEMORY
1400 )) {
1401 uacpi_error("Load: operation region is not SystemMemory\n");
1402 goto error_out;
1403 }
1404
1405 if (uacpi_unlikely(op_region->length < sizeof(struct acpi_sdt_hdr))) {
1406 uacpi_error(
1407 "Load: operation region is too small: %"UACPI_PRIu64"\n",
1408 UACPI_FMT64(op_region->length)
1409 );
1410 goto error_out;
1411 }
1412
1413 src_table = uacpi_kernel_map(addr: op_region->offset, len: op_region->length);
1414 if (uacpi_unlikely(src_table == UACPI_NULL)) {
1415 uacpi_error(
1416 "Load: failed to map operation region "
1417 "0x%016"UACPI_PRIX64" -> 0x%016"UACPI_PRIX64"\n",
1418 UACPI_FMT64(op_region->offset),
1419 UACPI_FMT64(op_region->offset + op_region->length)
1420 );
1421 goto error_out;
1422 }
1423
1424 unmap_src = UACPI_TRUE;
1425 declared_size = op_region->length;
1426 break;
1427 }
1428
1429 case UACPI_OBJECT_BUFFER: {
1430 uacpi_buffer *buffer;
1431
1432 buffer = src->buffer;
1433 if (buffer->size < sizeof(struct acpi_sdt_hdr)) {
1434 uacpi_error(
1435 "Load: buffer is too small: %zu\n",
1436 buffer->size
1437 );
1438 goto error_out;
1439 }
1440
1441 src_table = buffer->data;
1442 declared_size = buffer->size;
1443 break;
1444 }
1445
1446 default:
1447 uacpi_error(
1448 "Load: invalid argument '%s', expected "
1449 "Buffer/Field/OperationRegion\n",
1450 uacpi_object_type_to_string(src->type)
1451 );
1452 goto error_out;
1453 }
1454
1455 if (uacpi_unlikely(src_table->length > declared_size)) {
1456 uacpi_error(
1457 "Load: table size %u is larger than the declared size %zu\n",
1458 src_table->length, declared_size
1459 );
1460 goto error_out;
1461 }
1462
1463 if (uacpi_unlikely(src_table->length < sizeof(struct acpi_sdt_hdr))) {
1464 uacpi_error("Load: table size %u is too small\n", src_table->length);
1465 goto error_out;
1466 }
1467
1468 table_buffer = uacpi_kernel_alloc(size: src_table->length);
1469 if (uacpi_unlikely(table_buffer == UACPI_NULL))
1470 goto error_out;
1471
1472 uacpi_memcpy(dest: table_buffer, src: src_table, count: src_table->length);
1473
1474 if (unmap_src) {
1475 uacpi_kernel_unmap(addr: src_table, len: declared_size);
1476 unmap_src = UACPI_FALSE;
1477 }
1478
1479 ret = uacpi_table_install_with_origin(
1480 virt: table_buffer, origin: UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL, out_table: &table
1481 );
1482 if (uacpi_unlikely_error(ret)) {
1483 uacpi_free(table_buffer, src_table->length);
1484
1485 if (ret != UACPI_STATUS_OVERRIDEN)
1486 goto error_out;
1487 }
1488 uacpi_table_mark_as_loaded(idx: table.index);
1489
1490 item_array_at(arr: items, idx: 0)->node = uacpi_namespace_root();
1491
1492 method = item_array_at(arr: items, idx: 1)->obj->method;
1493 prepare_table_load(ptr: table.ptr, cause: UACPI_TABLE_LOAD_CAUSE_LOAD_OP, in_method: method);
1494
1495 return UACPI_STATUS_OK;
1496
1497error_out:
1498 if (unmap_src && src_table)
1499 uacpi_kernel_unmap(addr: src_table, len: declared_size);
1500 return UACPI_STATUS_OK;
1501}
1502
1503uacpi_status uacpi_execute_table(void *tbl, enum uacpi_table_load_cause cause)
1504{
1505 return do_load_table(parent: uacpi_namespace_root(), tbl, cause);
1506}
1507
1508uacpi_u32 get_field_length(struct item *item)
1509{
1510 struct package_length *pkg = &item->pkg;
1511 return pkg->end - pkg->begin;
1512}
1513
1514struct field_specific_data {
1515 uacpi_namespace_node *region;
1516 struct uacpi_field_unit *field0;
1517 struct uacpi_field_unit *field1;
1518 uacpi_u64 value;
1519};
1520
1521static uacpi_status ensure_is_a_field_unit(uacpi_namespace_node *node,
1522 uacpi_field_unit **out_field)
1523{
1524 uacpi_object *obj;
1525
1526 obj = uacpi_namespace_node_get_object(node);
1527 if (obj->type != UACPI_OBJECT_FIELD_UNIT) {
1528 uacpi_error(
1529 "Invalid argument: '%.4s' is not a field unit (%s)\n",
1530 node->name.text, uacpi_object_type_to_string(obj->type)
1531 );
1532 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
1533 }
1534
1535 *out_field = obj->field_unit;
1536 return UACPI_STATUS_OK;
1537}
1538
1539static uacpi_status ensure_is_an_op_region(uacpi_namespace_node *node,
1540 uacpi_namespace_node **out_node)
1541{
1542 uacpi_object *obj;
1543
1544 obj = uacpi_namespace_node_get_object(node);
1545 if (obj->type != UACPI_OBJECT_OPERATION_REGION) {
1546 uacpi_error(
1547 "Invalid argument: '%.4s' is not an operation region (%s)\n",
1548 node->name.text, uacpi_object_type_to_string(obj->type)
1549 );
1550 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
1551 }
1552
1553 *out_node = node;
1554 return UACPI_STATUS_OK;
1555}
1556
1557static uacpi_status handle_create_field(struct execution_context *ctx)
1558{
1559 uacpi_status ret;
1560 struct op_context *op_ctx = ctx->cur_op_ctx;
1561 uacpi_namespace_node *node;
1562 uacpi_object *obj, *connection_obj = UACPI_NULL;
1563 struct field_specific_data field_data;
1564 uacpi_size i = 1, bit_offset = 0;
1565
1566 uacpi_u8 raw_value, access_type, lock_rule, update_rule;
1567 uacpi_u8 access_attrib = 0, access_length = 0;
1568
1569 switch (op_ctx->op->code) {
1570 case UACPI_AML_OP_FieldOp:
1571 node = item_array_at(arr: &op_ctx->items, idx: i++)->node;
1572 ret = ensure_is_an_op_region(node, out_node: &field_data.region);
1573 if (uacpi_unlikely_error(ret))
1574 return ret;
1575 break;
1576
1577 case UACPI_AML_OP_BankFieldOp:
1578 node = item_array_at(arr: &op_ctx->items, idx: i++)->node;
1579 ret = ensure_is_an_op_region(node, out_node: &field_data.region);
1580 if (uacpi_unlikely_error(ret))
1581 return ret;
1582
1583 node = item_array_at(arr: &op_ctx->items, idx: i++)->node;
1584 ret = ensure_is_a_field_unit(node, out_field: &field_data.field0);
1585 if (uacpi_unlikely_error(ret))
1586 return ret;
1587
1588 field_data.value = item_array_at(arr: &op_ctx->items, idx: i++)->obj->integer;
1589 break;
1590
1591 case UACPI_AML_OP_IndexFieldOp:
1592 node = item_array_at(arr: &op_ctx->items, idx: i++)->node;
1593 ret = ensure_is_a_field_unit(node, out_field: &field_data.field0);
1594 if (uacpi_unlikely_error(ret))
1595 return ret;
1596
1597 node = item_array_at(arr: &op_ctx->items, idx: i++)->node;
1598 ret = ensure_is_a_field_unit(node, out_field: &field_data.field1);
1599 if (uacpi_unlikely_error(ret))
1600 return ret;
1601 break;
1602
1603 default:
1604 return UACPI_STATUS_INVALID_ARGUMENT;
1605 }
1606
1607 /*
1608 * ByteData
1609 * bit 0-3: AccessType
1610 * 0 AnyAcc
1611 * 1 ByteAcc
1612 * 2 WordAcc
1613 * 3 DWordAcc
1614 * 4 QWordAcc
1615 * 5 BufferAcc
1616 * 6 Reserved
1617 * 7-15 Reserved
1618 * bit 4: LockRule
1619 * 0 NoLock
1620 * 1 Lock
1621 * bit 5-6: UpdateRule
1622 * 0 Preserve
1623 * 1 WriteAsOnes
1624 * 2 WriteAsZeros
1625 * bit 7: Reserved (must be 0)
1626 */
1627 raw_value = item_array_at(arr: &op_ctx->items, idx: i++)->immediate;
1628 access_type = (raw_value >> 0) & 0b1111;
1629 lock_rule = (raw_value >> 4) & 0b1;
1630 update_rule = (raw_value >> 5) & 0b11;
1631
1632 while (i < item_array_size(arr: &op_ctx->items)) {
1633 struct item *item;
1634 item = item_array_at(arr: &op_ctx->items, idx: i++);
1635
1636 // An actual field object
1637 if (item->type == ITEM_NAMESPACE_NODE_METHOD_LOCAL) {
1638 uacpi_u32 length;
1639 uacpi_field_unit *field;
1640
1641 length = get_field_length(item: item_array_at(arr: &op_ctx->items, idx: i++));
1642 node = item->node;
1643
1644 obj = item_array_at(arr: &op_ctx->items, idx: i++)->obj;
1645 field = obj->field_unit;
1646
1647 field->update_rule = update_rule;
1648 field->lock_rule = lock_rule;
1649 field->attributes = access_attrib;
1650 field->access_length = access_length;
1651
1652 /*
1653 * 0 AnyAcc
1654 * 1 ByteAcc
1655 * 2 WordAcc
1656 * 3 DWordAcc
1657 * 4 QWordAcc
1658 * 5 BufferAcc
1659 * 6 Reserved
1660 * 7-15 Reserved
1661 */
1662 switch (access_type) {
1663 case 0:
1664 // TODO: optimize to calculate best access strategy
1665 UACPI_FALLTHROUGH;
1666 case 1:
1667 case 5:
1668 field->access_width_bytes = 1;
1669 break;
1670 case 2:
1671 field->access_width_bytes = 2;
1672 break;
1673 case 3:
1674 field->access_width_bytes = 4;
1675 break;
1676 case 4:
1677 field->access_width_bytes = 8;
1678 break;
1679 default:
1680 uacpi_error("invalid field '%.4s' access type %d\n",
1681 node->name.text, access_type);
1682 return UACPI_STATUS_AML_BAD_ENCODING;
1683 }
1684
1685 field->bit_length = length;
1686
1687 // FIXME: overflow, OOB, etc checks
1688 field->byte_offset = UACPI_ALIGN_DOWN(
1689 bit_offset / 8,
1690 field->access_width_bytes,
1691 uacpi_u32
1692 );
1693
1694 field->bit_offset_within_first_byte = bit_offset;
1695 field->bit_offset_within_first_byte =
1696 bit_offset & ((field->access_width_bytes * 8) - 1);
1697
1698 switch (op_ctx->op->code) {
1699 case UACPI_AML_OP_FieldOp:
1700 field->region = field_data.region;
1701 uacpi_shareable_ref(field->region);
1702
1703 field->kind = UACPI_FIELD_UNIT_KIND_NORMAL;
1704 break;
1705
1706 case UACPI_AML_OP_BankFieldOp:
1707 field->bank_region = field_data.region;
1708 uacpi_shareable_ref(field->bank_region);
1709
1710 field->bank_selection = field_data.field0;
1711 uacpi_shareable_ref(field->bank_selection);
1712
1713 field->bank_value = field_data.value;
1714 field->kind = UACPI_FIELD_UNIT_KIND_BANK;
1715 break;
1716
1717 case UACPI_AML_OP_IndexFieldOp:
1718 field->index = field_data.field0;
1719 uacpi_shareable_ref(field->index);
1720
1721 field->data = field_data.field1;
1722 uacpi_shareable_ref(field->data);
1723
1724 field->kind = UACPI_FIELD_UNIT_KIND_INDEX;
1725 break;
1726
1727 default:
1728 return UACPI_STATUS_INVALID_ARGUMENT;
1729 }
1730
1731 field->connection = connection_obj;
1732 if (field->connection)
1733 uacpi_object_ref(obj: field->connection);
1734
1735 node->object = uacpi_create_internal_reference(
1736 kind: UACPI_REFERENCE_KIND_NAMED, child: obj
1737 );
1738 if (uacpi_unlikely(node->object == UACPI_NULL))
1739 return UACPI_STATUS_OUT_OF_MEMORY;
1740
1741 ret = do_install_node_item(frame: ctx->cur_frame, item);
1742 if (uacpi_unlikely_error(ret))
1743 return ret;
1744
1745 bit_offset += length;
1746 continue;
1747 }
1748
1749 // All other stuff
1750 switch (item->immediate) {
1751 // ReservedField := 0x00 PkgLength
1752 case 0x00:
1753 bit_offset += get_field_length(item: item_array_at(arr: &op_ctx->items, idx: i++));
1754 break;
1755
1756 // AccessField := 0x01 AccessType AccessAttrib
1757 // ExtendedAccessField := 0x03 AccessType ExtendedAccessAttrib AccessLength
1758 case 0x01:
1759 case 0x03:
1760 raw_value = item_array_at(arr: &op_ctx->items, idx: i++)->immediate;
1761
1762 access_type = raw_value & 0b1111;
1763 access_attrib = (raw_value >> 6) & 0b11;
1764
1765 raw_value = item_array_at(arr: &op_ctx->items, idx: i++)->immediate;
1766
1767 /*
1768 * Bits 7:6
1769 * 0 = AccessAttrib = Normal Access Attributes
1770 * 1 = AccessAttrib = AttribBytes (x)
1771 * 2 = AccessAttrib = AttribRawBytes (x)
1772 * 3 = AccessAttrib = AttribRawProcessBytes (x)
1773 * x is encoded as bits 0:7 of the AccessAttrib byte.
1774 */
1775 if (access_attrib) {
1776 switch (access_attrib) {
1777 case 1:
1778 access_attrib = UACPI_ACCESS_ATTRIBUTE_BYTES;
1779 break;
1780 case 2:
1781 access_attrib = UACPI_ACCESS_ATTRIBUTE_RAW_BYTES;
1782 break;
1783 case 3:
1784 access_attrib = UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES;
1785 break;
1786 }
1787
1788 access_length = raw_value;
1789 } else { // Normal access attributes
1790 access_attrib = raw_value;
1791 }
1792
1793 if (item->immediate == 3)
1794 access_length = item_array_at(arr: &op_ctx->items, idx: i++)->immediate;
1795 break;
1796
1797 // ConnectField := <0x02 NameString> | <0x02 BufferData>
1798 case 0x02:
1799 connection_obj = item_array_at(arr: &op_ctx->items, idx: i++)->obj;
1800 break;
1801
1802 default:
1803 return UACPI_STATUS_INVALID_ARGUMENT;
1804 }
1805 }
1806
1807 return UACPI_STATUS_OK;
1808}
1809
1810static void truncate_number_if_needed(uacpi_object *obj)
1811{
1812 if (!g_uacpi_rt_ctx.is_rev1)
1813 return;
1814
1815 obj->integer &= 0xFFFFFFFF;
1816}
1817
1818static uacpi_u64 ones()
1819{
1820 return g_uacpi_rt_ctx.is_rev1 ? 0xFFFFFFFF : 0xFFFFFFFFFFFFFFFF;
1821}
1822
1823static uacpi_status method_get_ret_target(struct execution_context *ctx,
1824 uacpi_object **out_operand)
1825{
1826 uacpi_size depth;
1827
1828 // Check if we're targeting the previous call frame
1829 depth = call_frame_array_size(arr: &ctx->call_stack);
1830 if (depth > 1) {
1831 struct op_context *op_ctx;
1832 struct call_frame *frame;
1833
1834 frame = call_frame_array_at(arr: &ctx->call_stack, idx: depth - 2);
1835 depth = op_context_array_size(arr: &frame->pending_ops);
1836
1837 // Ok, no one wants the return value at call site. Discard it.
1838 if (!depth) {
1839 *out_operand = UACPI_NULL;
1840 return UACPI_STATUS_OK;
1841 }
1842
1843 op_ctx = op_context_array_at(arr: &frame->pending_ops, idx: depth - 1);
1844
1845 /*
1846 * Prevent the table being dynamically loaded from attempting to return
1847 * a value to the caller. This is unlikely to be ever encountered in the
1848 * wild, but we should still guard against the possibility.
1849 */
1850 if (uacpi_unlikely(op_ctx->op->code == UACPI_AML_OP_LoadOp ||
1851 op_ctx->op->code == UACPI_AML_OP_LoadTableOp)) {
1852 *out_operand = UACPI_NULL;
1853 return UACPI_STATUS_OK;
1854 }
1855
1856 *out_operand = item_array_last(arr: &op_ctx->items)->obj;
1857 return UACPI_STATUS_OK;
1858 }
1859
1860 return UACPI_STATUS_NOT_FOUND;
1861}
1862
1863static uacpi_status method_get_ret_object(struct execution_context *ctx,
1864 uacpi_object **out_obj)
1865{
1866 uacpi_status ret;
1867
1868 ret = method_get_ret_target(ctx, out_operand: out_obj);
1869 if (ret == UACPI_STATUS_NOT_FOUND) {
1870 *out_obj = ctx->ret;
1871 return UACPI_STATUS_OK;
1872 }
1873 if (ret != UACPI_STATUS_OK || *out_obj == UACPI_NULL)
1874 return ret;
1875
1876 *out_obj = uacpi_unwrap_internal_reference(object: *out_obj);
1877 return UACPI_STATUS_OK;
1878}
1879
1880static struct code_block *find_last_block(struct code_block_array *blocks,
1881 enum code_block_type type)
1882{
1883 uacpi_size i;
1884
1885 i = code_block_array_size(arr: blocks);
1886 while (i-- > 0) {
1887 struct code_block *block;
1888
1889 block = code_block_array_at(arr: blocks, idx: i);
1890 if (block->type == type)
1891 return block;
1892 }
1893
1894 return UACPI_NULL;
1895}
1896
1897static void update_scope(struct call_frame *frame)
1898{
1899 struct code_block *block;
1900
1901 block = find_last_block(blocks: &frame->code_blocks, type: CODE_BLOCK_SCOPE);
1902 if (block == UACPI_NULL) {
1903 frame->cur_scope = uacpi_namespace_root();
1904 return;
1905 }
1906
1907 frame->cur_scope = block->node;
1908}
1909
1910#define TICKS_PER_SECOND (1000ull * 1000ull * 10ull)
1911
1912static uacpi_status begin_block_execution(struct execution_context *ctx)
1913{
1914 struct call_frame *cur_frame = ctx->cur_frame;
1915 struct op_context *op_ctx = ctx->cur_op_ctx;
1916 struct package_length *pkg;
1917 struct code_block *block;
1918
1919 block = code_block_array_alloc(arr: &cur_frame->code_blocks);
1920 if (uacpi_unlikely(block == UACPI_NULL))
1921 return UACPI_STATUS_OUT_OF_MEMORY;
1922
1923 pkg = &item_array_at(arr: &op_ctx->items, idx: 0)->pkg;
1924
1925 switch (op_ctx->op->code) {
1926 case UACPI_AML_OP_IfOp:
1927 block->type = CODE_BLOCK_IF;
1928 break;
1929 case UACPI_AML_OP_ElseOp:
1930 block->type = CODE_BLOCK_ELSE;
1931 break;
1932 case UACPI_AML_OP_WhileOp:
1933 block->type = CODE_BLOCK_WHILE;
1934
1935 if (pkg->begin == cur_frame->prev_while_code_offset) {
1936 uacpi_u64 cur_ticks;
1937
1938 cur_ticks = uacpi_kernel_get_ticks();
1939
1940 if (uacpi_unlikely(cur_ticks > block->expiration_point)) {
1941 uacpi_error("loop time out after running for %u seconds\n",
1942 g_uacpi_rt_ctx.loop_timeout_seconds);
1943 code_block_array_pop(arr: &cur_frame->code_blocks);
1944 return UACPI_STATUS_AML_LOOP_TIMEOUT;
1945 }
1946
1947 block->expiration_point = cur_frame->prev_while_expiration;
1948 } else {
1949 /*
1950 * Calculate the expiration point for this loop.
1951 * If a loop is executed past this point, it will get aborted.
1952 */
1953 block->expiration_point = uacpi_kernel_get_ticks();
1954 block->expiration_point +=
1955 g_uacpi_rt_ctx.loop_timeout_seconds * TICKS_PER_SECOND;
1956 }
1957 break;
1958 case UACPI_AML_OP_ScopeOp:
1959 case UACPI_AML_OP_DeviceOp:
1960 case UACPI_AML_OP_ProcessorOp:
1961 case UACPI_AML_OP_PowerResOp:
1962 case UACPI_AML_OP_ThermalZoneOp:
1963 // Disarm the tracked package so that we don't skip the Scope
1964 op_ctx->tracked_pkg_idx = 0;
1965
1966 block->type = CODE_BLOCK_SCOPE;
1967 block->node = item_array_at(arr: &op_ctx->items, idx: 1)->node;
1968 break;
1969 default:
1970 code_block_array_pop(arr: &cur_frame->code_blocks);
1971 return UACPI_STATUS_INVALID_ARGUMENT;
1972 }
1973
1974 // -1 because we want to re-evaluate at the start of the op next time
1975 block->begin = pkg->begin - 1;
1976 block->end = pkg->end;
1977 ctx->cur_block = block;
1978
1979 cur_frame->last_while = find_last_block(blocks: &cur_frame->code_blocks,
1980 type: CODE_BLOCK_WHILE);
1981 update_scope(frame: cur_frame);
1982 return UACPI_STATUS_OK;
1983}
1984
1985static void frame_reset_post_end_block(struct execution_context *ctx,
1986 enum code_block_type type)
1987{
1988 struct call_frame *frame = ctx->cur_frame;
1989
1990 if (type == CODE_BLOCK_WHILE) {
1991 struct code_block *block = ctx->cur_block;
1992
1993 // + 1 here to skip the WhileOp and get to the PkgLength
1994 frame->prev_while_code_offset = block->begin + 1;
1995 frame->prev_while_expiration = block->expiration_point;
1996 }
1997
1998 code_block_array_pop(arr: &frame->code_blocks);
1999 ctx->cur_block = code_block_array_last(arr: &frame->code_blocks);
2000
2001 if (type == CODE_BLOCK_WHILE) {
2002 frame->last_while = find_last_block(blocks: &frame->code_blocks, type);
2003 } else if (type == CODE_BLOCK_SCOPE) {
2004 update_scope(frame);
2005 }
2006}
2007
2008static void debug_store_no_recurse(const uacpi_char *prefix, uacpi_object *src)
2009{
2010 switch (src->type) {
2011 case UACPI_OBJECT_UNINITIALIZED:
2012 uacpi_trace("%s Uninitialized\n", prefix);
2013 break;
2014 case UACPI_OBJECT_STRING:
2015 uacpi_trace("%s String => \"%s\"\n", prefix, src->buffer->text);
2016 break;
2017 case UACPI_OBJECT_INTEGER:
2018 if (g_uacpi_rt_ctx.is_rev1) {
2019 uacpi_trace(
2020 "%s Integer => 0x%08X\n", prefix, (uacpi_u32)src->integer
2021 );
2022 } else {
2023 uacpi_trace(
2024 "%s Integer => 0x%016"UACPI_PRIX64"\n", prefix,
2025 UACPI_FMT64(src->integer)
2026 );
2027 }
2028 break;
2029 case UACPI_OBJECT_REFERENCE:
2030 uacpi_trace("%s Reference @%p => %p\n", prefix, src, src->inner_object);
2031 break;
2032 case UACPI_OBJECT_PACKAGE:
2033 uacpi_trace(
2034 "%s Package @%p (%p) (%zu elements)\n",
2035 prefix, src, src->package, src->package->count
2036 );
2037 break;
2038 case UACPI_OBJECT_BUFFER:
2039 uacpi_trace(
2040 "%s Buffer @%p (%p) (%zu bytes)\n",
2041 prefix, src, src->buffer, src->buffer->size
2042 );
2043 break;
2044 case UACPI_OBJECT_OPERATION_REGION:
2045 uacpi_trace(
2046 "%s OperationRegion (ASID %d) 0x%016"UACPI_PRIX64
2047 " -> 0x%016"UACPI_PRIX64"\n", prefix,
2048 src->op_region->space, UACPI_FMT64(src->op_region->offset),
2049 UACPI_FMT64(src->op_region->offset + src->op_region->length)
2050 );
2051 break;
2052 case UACPI_OBJECT_POWER_RESOURCE:
2053 uacpi_trace(
2054 "%s Power Resource %d %d\n",
2055 prefix, src->power_resource.system_level,
2056 src->power_resource.resource_order
2057 );
2058 break;
2059 case UACPI_OBJECT_PROCESSOR:
2060 uacpi_trace(
2061 "%s Processor[%d] 0x%08X (%d)\n",
2062 prefix, src->processor->id, src->processor->block_address,
2063 src->processor->block_length
2064 );
2065 break;
2066 case UACPI_OBJECT_BUFFER_INDEX:
2067 uacpi_trace(
2068 "%s Buffer Index %p[%zu] => 0x%02X\n",
2069 prefix, src->buffer_index.buffer->data, src->buffer_index.idx,
2070 *buffer_index_cursor(&src->buffer_index)
2071 );
2072 break;
2073 case UACPI_OBJECT_MUTEX:
2074 uacpi_trace(
2075 "%s Mutex @%p (%p => %p) sync level %d\n",
2076 prefix, src, src->mutex, src->mutex->handle,
2077 src->mutex->sync_level
2078 );
2079 break;
2080 case UACPI_OBJECT_METHOD:
2081 uacpi_trace("%s Method @%p (%p)\n", prefix, src, src->method);
2082 break;
2083 default:
2084 uacpi_trace(
2085 "%s %s @%p\n",
2086 prefix, uacpi_object_type_to_string(src->type), src
2087 );
2088 }
2089}
2090
2091static uacpi_status debug_store(uacpi_object *src)
2092{
2093 /*
2094 * Don't bother running the body if current log level is not set to trace.
2095 * All DebugOp logging is done as TRACE exclusively.
2096 */
2097 if (!uacpi_should_log(lvl: UACPI_LOG_TRACE))
2098 return UACPI_STATUS_OK;
2099
2100 src = uacpi_unwrap_internal_reference(object: src);
2101
2102 debug_store_no_recurse(prefix: "[AML DEBUG]", src);
2103
2104 if (src->type == UACPI_OBJECT_PACKAGE) {
2105 uacpi_package *pkg = src->package;
2106 uacpi_size i;
2107
2108 for (i = 0; i < pkg->count; ++i) {
2109 uacpi_object *obj = pkg->objects[i];
2110 if (obj->type == UACPI_OBJECT_REFERENCE &&
2111 obj->flags == UACPI_REFERENCE_KIND_PKG_INDEX)
2112 obj = obj->inner_object;
2113
2114 debug_store_no_recurse(prefix: "Element:", src: obj);
2115 }
2116 }
2117
2118 return UACPI_STATUS_OK;
2119}
2120
2121/*
2122 * NOTE: this function returns the parent object
2123 */
2124uacpi_object *reference_unwind(uacpi_object *obj)
2125{
2126 uacpi_object *parent = obj;
2127
2128 while (obj) {
2129 if (obj->type != UACPI_OBJECT_REFERENCE)
2130 return parent;
2131
2132 parent = obj;
2133 obj = parent->inner_object;
2134 }
2135
2136 // This should be unreachable
2137 return UACPI_NULL;
2138}
2139
2140static void object_replace_child(uacpi_object *parent, uacpi_object *new_child)
2141{
2142 uacpi_object_detach_child(parent);
2143 uacpi_object_attach_child(parent, child: new_child);
2144}
2145
2146/*
2147 * Breakdown of what happens here:
2148 *
2149 * CopyObject(..., Obj) where Obj is:
2150 * 1. LocalX -> Overwrite LocalX.
2151 * 2. NAME -> Overwrite NAME.
2152 * 3. ArgX -> Overwrite ArgX unless ArgX is a reference, in that case
2153 * overwrite the referenced object.
2154 * 4. RefOf -> Not allowed here.
2155 * 5. Index -> Overwrite Object stored at the index.
2156 */
2157 static uacpi_status copy_object_to_reference(uacpi_object *dst,
2158 uacpi_object *src)
2159{
2160 uacpi_status ret;
2161 uacpi_object *src_obj, *new_obj;
2162
2163 switch (dst->flags) {
2164 case UACPI_REFERENCE_KIND_ARG: {
2165 uacpi_object *referenced_obj;
2166
2167 referenced_obj = uacpi_unwrap_internal_reference(object: dst);
2168 if (referenced_obj->type == UACPI_OBJECT_REFERENCE) {
2169 dst = reference_unwind(obj: referenced_obj);
2170 break;
2171 }
2172
2173 UACPI_FALLTHROUGH;
2174 }
2175 case UACPI_REFERENCE_KIND_LOCAL:
2176 case UACPI_REFERENCE_KIND_PKG_INDEX:
2177 case UACPI_REFERENCE_KIND_NAMED:
2178 break;
2179 default:
2180 return UACPI_STATUS_INVALID_ARGUMENT;
2181 }
2182
2183 src_obj = uacpi_unwrap_internal_reference(object: src);
2184
2185 new_obj = uacpi_create_object(type: UACPI_OBJECT_UNINITIALIZED);
2186 if (uacpi_unlikely(new_obj == UACPI_NULL))
2187 return UACPI_STATUS_OUT_OF_MEMORY;
2188
2189 ret = uacpi_object_assign(dst: new_obj, src: src_obj,
2190 UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
2191 if (uacpi_unlikely_error(ret))
2192 return ret;
2193
2194 object_replace_child(parent: dst, new_child: new_obj);
2195 uacpi_object_unref(obj: new_obj);
2196
2197 return UACPI_STATUS_OK;
2198}
2199
2200/*
2201 * if Store(..., Obj) where Obj is:
2202 * 1. LocalX/Index -> OVERWRITE unless the object is a reference, in that
2203 * case store to the referenced object _with_ implicit
2204 * cast.
2205 * 2. ArgX -> OVERWRITE unless the object is a reference, in that
2206 * case OVERWRITE the referenced object.
2207 * 3. NAME -> Store with implicit cast.
2208 * 4. RefOf -> Not allowed here.
2209 */
2210static uacpi_status store_to_reference(uacpi_object *dst,
2211 uacpi_object *src)
2212{
2213 uacpi_object *src_obj;
2214 uacpi_bool overwrite = UACPI_FALSE;
2215
2216 switch (dst->flags) {
2217 case UACPI_REFERENCE_KIND_LOCAL:
2218 case UACPI_REFERENCE_KIND_ARG:
2219 case UACPI_REFERENCE_KIND_PKG_INDEX: {
2220 uacpi_object *referenced_obj;
2221
2222 if (dst->flags == UACPI_REFERENCE_KIND_PKG_INDEX)
2223 referenced_obj = dst->inner_object;
2224 else
2225 referenced_obj = uacpi_unwrap_internal_reference(object: dst);
2226
2227 if (referenced_obj->type == UACPI_OBJECT_REFERENCE) {
2228 overwrite = dst->flags == UACPI_REFERENCE_KIND_ARG;
2229 dst = reference_unwind(obj: referenced_obj);
2230 break;
2231 }
2232
2233 overwrite = UACPI_TRUE;
2234 break;
2235 }
2236 case UACPI_REFERENCE_KIND_NAMED:
2237 dst = reference_unwind(obj: dst);
2238 break;
2239 default:
2240 return UACPI_STATUS_INVALID_ARGUMENT;
2241 }
2242
2243 src_obj = uacpi_unwrap_internal_reference(object: src);
2244 overwrite |= dst->inner_object->type == UACPI_OBJECT_UNINITIALIZED;
2245
2246 if (overwrite) {
2247 uacpi_status ret;
2248 uacpi_object *new_obj;
2249
2250 new_obj = uacpi_create_object(type: UACPI_OBJECT_UNINITIALIZED);
2251 if (uacpi_unlikely(new_obj == UACPI_NULL))
2252 return UACPI_STATUS_OUT_OF_MEMORY;
2253
2254 ret = uacpi_object_assign(dst: new_obj, src: src_obj,
2255 UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
2256 if (uacpi_unlikely_error(ret)) {
2257 uacpi_object_unref(obj: new_obj);
2258 return ret;
2259 }
2260
2261 object_replace_child(parent: dst, new_child: new_obj);
2262 uacpi_object_unref(obj: new_obj);
2263 return UACPI_STATUS_OK;
2264 }
2265
2266 return object_assign_with_implicit_cast(dst: dst->inner_object, src: src_obj);
2267}
2268
2269static uacpi_status handle_ref_or_deref_of(struct execution_context *ctx)
2270{
2271 struct op_context *op_ctx = ctx->cur_op_ctx;
2272 uacpi_object *dst, *src;
2273
2274 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
2275
2276 if (op_ctx->op->code == UACPI_AML_OP_CondRefOfOp)
2277 dst = item_array_at(arr: &op_ctx->items, idx: 2)->obj;
2278 else
2279 dst = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
2280
2281 if (op_ctx->op->code == UACPI_AML_OP_DerefOfOp) {
2282 uacpi_bool was_a_reference = UACPI_FALSE;
2283
2284 if (src->type == UACPI_OBJECT_REFERENCE) {
2285 was_a_reference = UACPI_TRUE;
2286
2287 /*
2288 * Explicit dereferencing [DerefOf] behavior:
2289 * Simply grabs the bottom-most object that is not a reference.
2290 * This mimics the behavior of NT Acpi.sys: any DerfOf fetches
2291 * the bottom-most reference. Note that this is different from
2292 * ACPICA where DerefOf dereferences one level.
2293 */
2294 src = reference_unwind(obj: src)->inner_object;
2295 }
2296
2297 if (src->type == UACPI_OBJECT_BUFFER_INDEX) {
2298 uacpi_buffer_index *buf_idx = &src->buffer_index;
2299
2300 dst->type = UACPI_OBJECT_INTEGER;
2301 uacpi_memcpy_zerout(
2302 dst: &dst->integer, src: buffer_index_cursor(buf_idx),
2303 dst_size: sizeof(dst->integer), src_size: 1
2304 );
2305 return UACPI_STATUS_OK;
2306 }
2307
2308 if (!was_a_reference) {
2309 uacpi_error(
2310 "invalid DerefOf argument: %s, expected a reference\n",
2311 uacpi_object_type_to_string(src->type)
2312 );
2313 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
2314 }
2315
2316 return uacpi_object_assign(dst, src,
2317 UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY);
2318 }
2319
2320 dst->type = UACPI_OBJECT_REFERENCE;
2321 dst->inner_object = src;
2322 uacpi_object_ref(obj: src);
2323 return UACPI_STATUS_OK;
2324}
2325
2326static uacpi_status do_binary_math(
2327 uacpi_object *arg0, uacpi_object *arg1,
2328 uacpi_object *tgt0, uacpi_object *tgt1,
2329 uacpi_aml_op op
2330)
2331{
2332 uacpi_u64 lhs, rhs, res;
2333 uacpi_bool should_negate = UACPI_FALSE;
2334
2335 lhs = arg0->integer;
2336 rhs = arg1->integer;
2337
2338 switch (op)
2339 {
2340 case UACPI_AML_OP_AddOp:
2341 res = lhs + rhs;
2342 break;
2343 case UACPI_AML_OP_SubtractOp:
2344 res = lhs - rhs;
2345 break;
2346 case UACPI_AML_OP_MultiplyOp:
2347 res = lhs * rhs;
2348 break;
2349 case UACPI_AML_OP_ShiftLeftOp:
2350 case UACPI_AML_OP_ShiftRightOp:
2351 if (rhs <= (g_uacpi_rt_ctx.is_rev1 ? 31 : 63)) {
2352 if (op == UACPI_AML_OP_ShiftLeftOp)
2353 res = lhs << rhs;
2354 else
2355 res = lhs >> rhs;
2356 } else {
2357 res = 0;
2358 }
2359 break;
2360 case UACPI_AML_OP_NandOp:
2361 should_negate = UACPI_TRUE;
2362 UACPI_FALLTHROUGH;
2363 case UACPI_AML_OP_AndOp:
2364 res = rhs & lhs;
2365 break;
2366 case UACPI_AML_OP_NorOp:
2367 should_negate = UACPI_TRUE;
2368 UACPI_FALLTHROUGH;
2369 case UACPI_AML_OP_OrOp:
2370 res = rhs | lhs;
2371 break;
2372 case UACPI_AML_OP_XorOp:
2373 res = rhs ^ lhs;
2374 break;
2375 case UACPI_AML_OP_DivideOp:
2376 if (uacpi_unlikely(rhs == 0)) {
2377 uacpi_error("attempted to divide by zero\n");
2378 return UACPI_STATUS_AML_BAD_ENCODING;
2379 }
2380 tgt1->integer = lhs / rhs;
2381 res = lhs % rhs;
2382 break;
2383 case UACPI_AML_OP_ModOp:
2384 if (uacpi_unlikely(rhs == 0)) {
2385 uacpi_error("attempted to calculate modulo of zero\n");
2386 return UACPI_STATUS_AML_BAD_ENCODING;
2387 }
2388 res = lhs % rhs;
2389 break;
2390 default:
2391 return UACPI_STATUS_INVALID_ARGUMENT;
2392 }
2393
2394 if (should_negate)
2395 res = ~res;
2396
2397 tgt0->integer = res;
2398 return UACPI_STATUS_OK;
2399}
2400
2401static uacpi_status handle_binary_math(struct execution_context *ctx)
2402{
2403 uacpi_object *arg0, *arg1, *tgt0, *tgt1;
2404 struct item_array *items = &ctx->cur_op_ctx->items;
2405 uacpi_aml_op op = ctx->cur_op_ctx->op->code;
2406
2407 arg0 = item_array_at(arr: items, idx: 0)->obj;
2408 arg1 = item_array_at(arr: items, idx: 1)->obj;
2409
2410 if (op == UACPI_AML_OP_DivideOp) {
2411 tgt0 = item_array_at(arr: items, idx: 4)->obj;
2412 tgt1 = item_array_at(arr: items, idx: 5)->obj;
2413 } else {
2414 tgt0 = item_array_at(arr: items, idx: 3)->obj;
2415 tgt1 = UACPI_NULL;
2416 }
2417
2418 return do_binary_math(arg0, arg1, tgt0, tgt1, op);
2419}
2420
2421static uacpi_status handle_unary_math(struct execution_context *ctx)
2422{
2423 uacpi_object *arg, *tgt;
2424 struct item_array *items = &ctx->cur_op_ctx->items;
2425 uacpi_aml_op op = ctx->cur_op_ctx->op->code;
2426
2427 arg = item_array_at(arr: items, idx: 0)->obj;
2428 tgt = item_array_at(arr: items, idx: 2)->obj;
2429
2430 switch (op) {
2431 case UACPI_AML_OP_NotOp:
2432 tgt->integer = ~arg->integer;
2433 truncate_number_if_needed(obj: tgt);
2434 break;
2435 case UACPI_AML_OP_FindSetRightBitOp:
2436 tgt->integer = uacpi_bit_scan_forward(arg->integer);
2437 break;
2438 case UACPI_AML_OP_FindSetLeftBitOp:
2439 tgt->integer = uacpi_bit_scan_backward(arg->integer);
2440 break;
2441 default:
2442 return UACPI_STATUS_INVALID_ARGUMENT;
2443 }
2444
2445 return UACPI_STATUS_OK;
2446}
2447
2448static uacpi_status ensure_valid_idx(uacpi_object *obj, uacpi_size idx,
2449 uacpi_size src_size)
2450{
2451 if (uacpi_likely(idx < src_size))
2452 return UACPI_STATUS_OK;
2453
2454 uacpi_error(
2455 "Invalid index %zu, %s@%p has %zu elements\n",
2456 idx, uacpi_object_type_to_string(obj->type), obj, src_size
2457 );
2458 return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX;
2459}
2460
2461static uacpi_status handle_index(struct execution_context *ctx)
2462{
2463 uacpi_status ret;
2464 struct op_context *op_ctx = ctx->cur_op_ctx;
2465 uacpi_object *src;
2466 struct item *dst;
2467 uacpi_size idx;
2468
2469 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
2470 idx = item_array_at(arr: &op_ctx->items, idx: 1)->obj->integer;
2471 dst = item_array_at(arr: &op_ctx->items, idx: 3);
2472
2473 switch (src->type) {
2474 case UACPI_OBJECT_BUFFER:
2475 case UACPI_OBJECT_STRING: {
2476 uacpi_buffer_index *buf_idx;
2477 struct object_storage_as_buffer buf;
2478 get_object_storage(obj: src, out_buf: &buf, UACPI_FALSE);
2479
2480 ret = ensure_valid_idx(obj: src, idx, src_size: buf.len);
2481 if (uacpi_unlikely_error(ret))
2482 return ret;
2483
2484 dst->type = ITEM_OBJECT;
2485 dst->obj = uacpi_create_object(type: UACPI_OBJECT_BUFFER_INDEX);
2486 if (uacpi_unlikely(dst->obj == UACPI_NULL))
2487 return UACPI_STATUS_OUT_OF_MEMORY;
2488
2489 buf_idx = &dst->obj->buffer_index;
2490 buf_idx->idx = idx;
2491 buf_idx->buffer = src->buffer;
2492 uacpi_shareable_ref(buf_idx->buffer);
2493
2494 break;
2495 }
2496 case UACPI_OBJECT_PACKAGE: {
2497 uacpi_package *pkg = src->package;
2498 uacpi_object *obj;
2499
2500 ret = ensure_valid_idx(obj: src, idx, src_size: pkg->count);
2501 if (uacpi_unlikely_error(ret))
2502 return ret;
2503
2504 /*
2505 * Lazily transform the package element into an internal reference
2506 * to itself of type PKG_INDEX. This is needed to support stuff like
2507 * CopyObject(..., Index(pkg, X)) where the new object must be
2508 * propagated to anyone else with a currently alive index object.
2509 *
2510 * Sidenote: Yes, IndexOp is not a SimpleName, so technically it is
2511 * illegal to CopyObject to it. However, yet again we fall
2512 * victim to the NT ACPI driver implementation, which allows
2513 * it just fine.
2514 */
2515 obj = pkg->objects[idx];
2516 if (obj->type != UACPI_OBJECT_REFERENCE ||
2517 obj->flags != UACPI_REFERENCE_KIND_PKG_INDEX) {
2518
2519 obj = uacpi_create_internal_reference(
2520 kind: UACPI_REFERENCE_KIND_PKG_INDEX, child: obj
2521 );
2522 if (uacpi_unlikely(obj == UACPI_NULL))
2523 return UACPI_STATUS_OUT_OF_MEMORY;
2524
2525 pkg->objects[idx] = obj;
2526 uacpi_object_unref(obj: obj->inner_object);
2527 }
2528
2529 dst->obj = obj;
2530 dst->type = ITEM_OBJECT;
2531 uacpi_object_ref(obj: dst->obj);
2532 break;
2533 }
2534 default:
2535 uacpi_error(
2536 "Invalid argument for Index: %s, "
2537 "expected String/Buffer/Package\n",
2538 uacpi_object_type_to_string(src->type)
2539 );
2540 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
2541 }
2542
2543 return UACPI_STATUS_OK;
2544}
2545
2546static uacpi_u64 object_to_integer(const uacpi_object *obj,
2547 uacpi_size max_buffer_bytes)
2548{
2549 uacpi_u64 dst;
2550
2551 switch (obj->type) {
2552 case UACPI_OBJECT_INTEGER:
2553 dst = obj->integer;
2554 break;
2555 case UACPI_OBJECT_BUFFER: {
2556 uacpi_size bytes;
2557 bytes = UACPI_MIN(max_buffer_bytes, obj->buffer->size);
2558 uacpi_memcpy_zerout(dst: &dst, src: obj->buffer->data, dst_size: sizeof(dst), src_size: bytes);
2559 break;
2560 }
2561 case UACPI_OBJECT_STRING:
2562 uacpi_string_to_integer(
2563 str: obj->buffer->text, max_chars: obj->buffer->size, base: UACPI_BASE_AUTO, out_value: &dst
2564 );
2565 break;
2566 default:
2567 dst = 0;
2568 break;
2569 }
2570
2571 return dst;
2572}
2573
2574static uacpi_status integer_to_string(
2575 uacpi_u64 integer, uacpi_buffer *str, uacpi_bool is_hex
2576)
2577{
2578 int repr_len;
2579 uacpi_char int_buf[21];
2580 uacpi_size final_size;
2581
2582 repr_len = uacpi_snprintf(
2583 buffer: int_buf, capacity: sizeof(int_buf),
2584 fmt: is_hex ? "%"UACPI_PRIX64 : "%"UACPI_PRIu64,
2585 UACPI_FMT64(integer)
2586 );
2587 if (uacpi_unlikely(repr_len < 0))
2588 return UACPI_STATUS_INVALID_ARGUMENT;
2589
2590 // 0x prefix + repr + \0
2591 final_size = (is_hex ? 2 : 0) + repr_len + 1;
2592
2593 str->data = uacpi_kernel_alloc(size: final_size);
2594 if (uacpi_unlikely(str->data == UACPI_NULL))
2595 return UACPI_STATUS_OUT_OF_MEMORY;
2596
2597 if (is_hex) {
2598 str->text[0] = '0';
2599 str->text[1] = 'x';
2600 }
2601 uacpi_memcpy(dest: str->text + (is_hex ? 2 : 0), src: int_buf, count: repr_len + 1);
2602 str->size = final_size;
2603
2604 return UACPI_STATUS_OK;
2605}
2606
2607static uacpi_status buffer_to_string(
2608 uacpi_buffer *buf, uacpi_buffer *str, uacpi_bool is_hex
2609)
2610{
2611 int repr_len;
2612 uacpi_char int_buf[5];
2613 uacpi_size i, final_size;
2614 uacpi_char *cursor;
2615
2616 if (is_hex) {
2617 final_size = 4 * buf->size;
2618 } else {
2619 final_size = 0;
2620
2621 for (i = 0; i < buf->size; ++i) {
2622 uacpi_u8 value = ((uacpi_u8*)buf->data)[i];
2623
2624 if (value < 10)
2625 final_size += 1;
2626 else if (value < 100)
2627 final_size += 2;
2628 else
2629 final_size += 3;
2630 }
2631 }
2632
2633 // Comma for every value but one
2634 final_size += buf->size - 1;
2635
2636 // Null terminator
2637 final_size += 1;
2638
2639 str->data = uacpi_kernel_alloc(size: final_size);
2640 if (uacpi_unlikely(str->data == UACPI_NULL))
2641 return UACPI_STATUS_OUT_OF_MEMORY;
2642
2643 cursor = str->data;
2644
2645 for (i = 0; i < buf->size; ++i) {
2646 repr_len = uacpi_snprintf(
2647 buffer: int_buf, capacity: sizeof(int_buf),
2648 fmt: is_hex ? "0x%02X" : "%d",
2649 ((uacpi_u8*)buf->data)[i]
2650 );
2651 if (uacpi_unlikely(repr_len < 0)) {
2652 uacpi_free(str->data, final_size);
2653 str->data = UACPI_NULL;
2654 return UACPI_STATUS_INVALID_ARGUMENT;
2655 }
2656
2657 uacpi_memcpy(dest: cursor, src: int_buf, count: repr_len + 1);
2658 cursor += repr_len;
2659
2660 if (i != buf->size - 1)
2661 *cursor++ = ',';
2662 }
2663
2664 str->size = final_size;
2665 return UACPI_STATUS_OK;
2666}
2667
2668static uacpi_status do_make_empty_object(uacpi_buffer *buf,
2669 uacpi_bool is_string)
2670{
2671 buf->text = uacpi_kernel_calloc(count: 1, size: sizeof(uacpi_char));
2672 if (uacpi_unlikely(buf->text == UACPI_NULL))
2673 return UACPI_STATUS_OUT_OF_MEMORY;
2674
2675 if (is_string)
2676 buf->size = sizeof(uacpi_char);
2677
2678 return UACPI_STATUS_OK;
2679}
2680
2681static uacpi_status make_null_string(uacpi_buffer *buf)
2682{
2683 return do_make_empty_object(buf, UACPI_TRUE);
2684}
2685
2686static uacpi_status make_null_buffer(uacpi_buffer *buf)
2687{
2688 /*
2689 * Allocate at least 1 byte just to be safe,
2690 * even for empty buffers. We still set the
2691 * size to 0 though.
2692 */
2693 return do_make_empty_object(buf, UACPI_FALSE);
2694}
2695
2696static uacpi_status handle_to(struct execution_context *ctx)
2697{
2698 uacpi_status ret = UACPI_STATUS_OK;
2699 struct op_context *op_ctx = ctx->cur_op_ctx;
2700 uacpi_object *src, *dst;
2701
2702 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
2703 dst = item_array_at(arr: &op_ctx->items, idx: 2)->obj;
2704
2705 switch (op_ctx->op->code) {
2706 case UACPI_AML_OP_ToIntegerOp:
2707 // NT always takes the first 8 bytes, even for revision 1
2708 dst->integer = object_to_integer(obj: src, max_buffer_bytes: 8);
2709 break;
2710
2711 case UACPI_AML_OP_ToHexStringOp:
2712 case UACPI_AML_OP_ToDecimalStringOp: {
2713 uacpi_bool is_hex = op_ctx->op->code == UACPI_AML_OP_ToHexStringOp;
2714
2715 if (src->type == UACPI_OBJECT_INTEGER) {
2716 ret = integer_to_string(integer: src->integer, str: dst->buffer, is_hex);
2717 break;
2718 } else if (src->type == UACPI_OBJECT_BUFFER) {
2719 if (uacpi_unlikely(src->buffer->size == 0))
2720 return make_null_string(buf: dst->buffer);
2721
2722 ret = buffer_to_string(buf: src->buffer, str: dst->buffer, is_hex);
2723 break;
2724 }
2725 UACPI_FALLTHROUGH;
2726 }
2727 case UACPI_AML_OP_ToBufferOp: {
2728 struct object_storage_as_buffer buf;
2729 uacpi_u8 *dst_buf;
2730
2731 ret = get_object_storage(obj: src, out_buf: &buf, UACPI_TRUE);
2732 if (uacpi_unlikely_error(ret))
2733 return ret;
2734
2735 if (uacpi_unlikely(buf.len == 0))
2736 return make_null_buffer(buf: dst->buffer);
2737
2738 dst_buf = uacpi_kernel_alloc(size: buf.len);
2739 if (uacpi_unlikely(dst_buf == UACPI_NULL))
2740 return UACPI_STATUS_OUT_OF_MEMORY;
2741
2742 uacpi_memcpy(dest: dst_buf, src: buf.ptr, count: buf.len);
2743 dst->buffer->data = dst_buf;
2744 dst->buffer->size = buf.len;
2745 break;
2746 }
2747
2748 default:
2749 return UACPI_STATUS_INVALID_ARGUMENT;
2750 }
2751
2752 return ret;
2753}
2754
2755static uacpi_status handle_to_string(struct execution_context *ctx)
2756{
2757 struct op_context *op_ctx = ctx->cur_op_ctx;
2758 uacpi_buffer *src_buf, *dst_buf;
2759 uacpi_size req_len, len;
2760
2761 src_buf = item_array_at(arr: &op_ctx->items, idx: 0)->obj->buffer;
2762 req_len = item_array_at(arr: &op_ctx->items, idx: 1)->obj->integer;
2763 dst_buf = item_array_at(arr: &op_ctx->items, idx: 3)->obj->buffer;
2764
2765 len = UACPI_MIN(req_len, src_buf->size);
2766 if (uacpi_unlikely(len == 0))
2767 return make_null_string(buf: dst_buf);
2768
2769 len = uacpi_strnlen(str: src_buf->text, max: len);
2770
2771 dst_buf->text = uacpi_kernel_alloc(size: len + 1);
2772 if (uacpi_unlikely(dst_buf->text == UACPI_NULL))
2773 return UACPI_STATUS_OUT_OF_MEMORY;
2774
2775 uacpi_memcpy(dest: dst_buf->text, src: src_buf->data, count: len);
2776 dst_buf->text[len] = '\0';
2777 dst_buf->size = len + 1;
2778
2779 return UACPI_STATUS_OK;
2780}
2781
2782static uacpi_status handle_mid(struct execution_context *ctx)
2783{
2784 struct op_context *op_ctx = ctx->cur_op_ctx;
2785 uacpi_object *src, *dst;
2786 struct object_storage_as_buffer src_buf;
2787 uacpi_buffer *dst_buf;
2788 uacpi_size idx, len;
2789 uacpi_bool is_string;
2790
2791 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
2792 if (uacpi_unlikely(src->type != UACPI_OBJECT_STRING &&
2793 src->type != UACPI_OBJECT_BUFFER)) {
2794 uacpi_error(
2795 "Invalid argument for Mid: %s, expected String/Buffer\n",
2796 uacpi_object_type_to_string(src->type)
2797 );
2798 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
2799 }
2800
2801 idx = item_array_at(arr: &op_ctx->items, idx: 1)->obj->integer;
2802 len = item_array_at(arr: &op_ctx->items, idx: 2)->obj->integer;
2803 dst = item_array_at(arr: &op_ctx->items, idx: 4)->obj;
2804 dst_buf = dst->buffer;
2805
2806 is_string = src->type == UACPI_OBJECT_STRING;
2807 get_object_storage(obj: src, out_buf: &src_buf, UACPI_FALSE);
2808
2809 if (uacpi_unlikely(src_buf.len == 0 || idx >= src_buf.len || len == 0)) {
2810 if (src->type == UACPI_OBJECT_STRING) {
2811 dst->type = UACPI_OBJECT_STRING;
2812 return make_null_string(buf: dst_buf);
2813 }
2814
2815 return make_null_buffer(buf: dst_buf);
2816 }
2817
2818 // Guaranteed to be at least 1 here
2819 len = UACPI_MIN(len, src_buf.len - idx);
2820
2821 dst_buf->data = uacpi_kernel_alloc(size: len + is_string);
2822 if (uacpi_unlikely(dst_buf->data == UACPI_NULL))
2823 return UACPI_STATUS_OUT_OF_MEMORY;
2824
2825 uacpi_memcpy(dest: dst_buf->data, src: (uacpi_u8*)src_buf.ptr + idx, count: len);
2826 dst_buf->size = len;
2827
2828 if (is_string) {
2829 dst_buf->text[dst_buf->size++] = '\0';
2830 dst->type = UACPI_OBJECT_STRING;
2831 }
2832
2833 return UACPI_STATUS_OK;
2834}
2835
2836static uacpi_status handle_concatenate(struct execution_context *ctx)
2837{
2838 uacpi_status ret = UACPI_STATUS_OK;
2839 struct op_context *op_ctx = ctx->cur_op_ctx;
2840 uacpi_object *arg0, *arg1, *dst;
2841 uacpi_u8 *dst_buf;
2842 uacpi_size buf_size = 0;
2843
2844 arg0 = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
2845 arg1 = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
2846 dst = item_array_at(arr: &op_ctx->items, idx: 3)->obj;
2847
2848 switch (arg0->type) {
2849 case UACPI_OBJECT_INTEGER: {
2850 uacpi_u64 arg1_as_int;
2851 uacpi_size int_size;
2852
2853 int_size = sizeof_int();
2854 buf_size = int_size * 2;
2855
2856 dst_buf = uacpi_kernel_alloc(size: buf_size);
2857 if (uacpi_unlikely(dst_buf == UACPI_NULL))
2858 return UACPI_STATUS_OUT_OF_MEMORY;
2859
2860 arg1_as_int = object_to_integer(obj: arg1, max_buffer_bytes: 8);
2861
2862 uacpi_memcpy(dest: dst_buf, src: &arg0->integer, count: int_size);
2863 uacpi_memcpy(dest: dst_buf+ int_size, src: &arg1_as_int, count: int_size);
2864 break;
2865 }
2866 case UACPI_OBJECT_BUFFER: {
2867 uacpi_buffer *arg0_buf = arg0->buffer;
2868 struct object_storage_as_buffer arg1_buf;
2869
2870 get_object_storage(obj: arg1, out_buf: &arg1_buf, UACPI_TRUE);
2871 buf_size = arg0_buf->size + arg1_buf.len;
2872
2873 dst_buf = uacpi_kernel_alloc(size: buf_size);
2874 if (uacpi_unlikely(dst_buf == UACPI_NULL))
2875 return UACPI_STATUS_OUT_OF_MEMORY;
2876
2877 uacpi_memcpy(dest: dst_buf, src: arg0_buf->data, count: arg0_buf->size);
2878 uacpi_memcpy(dest: dst_buf + arg0_buf->size, src: arg1_buf.ptr, count: arg1_buf.len);
2879 break;
2880 }
2881 case UACPI_OBJECT_STRING: {
2882 uacpi_char int_buf[17];
2883 void *arg1_ptr;
2884 uacpi_size arg0_size, arg1_size;
2885 uacpi_buffer *arg0_buf = arg0->buffer;
2886
2887 switch (arg1->type) {
2888 case UACPI_OBJECT_INTEGER: {
2889 int size;
2890 size = uacpi_snprintf(buffer: int_buf, capacity: sizeof(int_buf), fmt: "%"UACPI_PRIx64,
2891 UACPI_FMT64(arg1->integer));
2892 if (size < 0)
2893 return UACPI_STATUS_INVALID_ARGUMENT;
2894
2895 arg1_ptr = int_buf;
2896 arg1_size = size + 1;
2897 break;
2898 }
2899 case UACPI_OBJECT_STRING:
2900 arg1_ptr = arg1->buffer->data;
2901 arg1_size = arg1->buffer->size;
2902 break;
2903 case UACPI_OBJECT_BUFFER: {
2904 uacpi_buffer tmp_buf;
2905
2906 ret = buffer_to_string(buf: arg1->buffer, str: &tmp_buf, UACPI_TRUE);
2907 if (uacpi_unlikely_error(ret))
2908 return ret;
2909
2910 arg1_ptr = tmp_buf.data;
2911 arg1_size = tmp_buf.size;
2912 break;
2913 }
2914 default:
2915 return UACPI_STATUS_INVALID_ARGUMENT;
2916 }
2917
2918 arg0_size = arg0_buf->size ? arg0_buf->size - 1 : arg0_buf->size;
2919 buf_size = arg0_size + arg1_size;
2920
2921 dst_buf = uacpi_kernel_alloc(size: buf_size);
2922 if (uacpi_unlikely(dst_buf == UACPI_NULL)) {
2923 ret = UACPI_STATUS_OUT_OF_MEMORY;
2924 goto cleanup;
2925 }
2926
2927 uacpi_memcpy(dest: dst_buf, src: arg0_buf->data, count: arg0_size);
2928 uacpi_memcpy(dest: dst_buf + arg0_size, src: arg1_ptr, count: arg1_size);
2929 dst->type = UACPI_OBJECT_STRING;
2930
2931 cleanup:
2932 if (arg1->type == UACPI_OBJECT_BUFFER)
2933 uacpi_free(arg1_ptr, arg1_size);
2934 break;
2935 }
2936 default:
2937 return UACPI_STATUS_INVALID_ARGUMENT;
2938 }
2939
2940 if (uacpi_likely_success(ret)) {
2941 dst->buffer->data = dst_buf;
2942 dst->buffer->size = buf_size;
2943 }
2944 return ret;
2945}
2946
2947static uacpi_status handle_concatenate_res(struct execution_context *ctx)
2948{
2949 uacpi_status ret;
2950 struct op_context *op_ctx = ctx->cur_op_ctx;
2951 uacpi_object *arg0, *arg1, *dst;
2952 uacpi_u8 *dst_buf;
2953 uacpi_size dst_size, arg0_size, arg1_size;
2954
2955 arg0 = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
2956 arg1 = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
2957 dst = item_array_at(arr: &op_ctx->items, idx: 3)->obj;
2958
2959 ret = uacpi_find_aml_resource_end_tag(buffer: arg0->buffer, out_offset: &arg0_size);
2960 if (uacpi_unlikely_error(ret))
2961 return ret;
2962
2963 ret = uacpi_find_aml_resource_end_tag(buffer: arg1->buffer, out_offset: &arg1_size);
2964 if (uacpi_unlikely_error(ret))
2965 return ret;
2966
2967 dst_size = arg0_size + arg1_size + sizeof(struct acpi_resource_end_tag);
2968
2969 dst_buf = uacpi_kernel_alloc(size: dst_size);
2970 if (uacpi_unlikely(dst_buf == UACPI_NULL))
2971 return UACPI_STATUS_OUT_OF_MEMORY;
2972
2973 dst->buffer->data = dst_buf;
2974 dst->buffer->size = dst_size;
2975
2976 uacpi_memcpy(dest: dst_buf, src: arg0->buffer->data, count: arg0_size);
2977 uacpi_memcpy(dest: dst_buf + arg0_size, src: arg1->buffer->data, count: arg1_size);
2978
2979 /*
2980 * Small item (0), End Tag (0x0F), length 1
2981 * Leave the checksum as 0
2982 */
2983 dst_buf[dst_size - 2] =
2984 (ACPI_RESOURCE_END_TAG << ACPI_SMALL_ITEM_NAME_IDX) |
2985 (sizeof(struct acpi_resource_end_tag) - 1);
2986 dst_buf[dst_size - 1] = 0;
2987
2988 return UACPI_STATUS_OK;
2989}
2990
2991static uacpi_status handle_sizeof(struct execution_context *ctx)
2992{
2993 struct op_context *op_ctx = ctx->cur_op_ctx;
2994 uacpi_object *src, *dst;
2995
2996 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
2997 dst = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
2998
2999 if (uacpi_likely(src->type == UACPI_OBJECT_REFERENCE))
3000 src = reference_unwind(obj: src)->inner_object;
3001
3002 switch (src->type) {
3003 case UACPI_OBJECT_STRING:
3004 case UACPI_OBJECT_BUFFER: {
3005 struct object_storage_as_buffer buf;
3006 get_object_storage(obj: src, out_buf: &buf, UACPI_FALSE);
3007
3008 dst->integer = buf.len;
3009 break;
3010 }
3011
3012 case UACPI_OBJECT_PACKAGE:
3013 dst->integer = src->package->count;
3014 break;
3015
3016 default:
3017 uacpi_error(
3018 "Invalid argument for Sizeof: %s, "
3019 "expected String/Buffer/Package\n",
3020 uacpi_object_type_to_string(src->type)
3021 );
3022 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
3023 }
3024
3025 return UACPI_STATUS_OK;
3026}
3027
3028static uacpi_status handle_object_type(struct execution_context *ctx)
3029{
3030 struct op_context *op_ctx = ctx->cur_op_ctx;
3031 uacpi_object *src, *dst;
3032
3033 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
3034 dst = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3035
3036 if (uacpi_likely(src->type == UACPI_OBJECT_REFERENCE))
3037 src = reference_unwind(obj: src)->inner_object;
3038
3039 dst->integer = src->type;
3040 if (dst->integer == UACPI_OBJECT_BUFFER_INDEX)
3041 dst->integer = UACPI_OBJECT_BUFFER_FIELD;
3042
3043 return UACPI_STATUS_OK;
3044}
3045
3046static uacpi_status handle_timer(struct execution_context *ctx)
3047{
3048 struct op_context *op_ctx = ctx->cur_op_ctx;
3049 uacpi_object *dst;
3050
3051 dst = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
3052 dst->integer = uacpi_kernel_get_ticks();
3053
3054 return UACPI_STATUS_OK;
3055}
3056
3057static uacpi_status handle_stall_or_sleep(struct execution_context *ctx)
3058{
3059 struct op_context *op_ctx = ctx->cur_op_ctx;
3060 uacpi_u64 time;
3061
3062 time = item_array_at(arr: &op_ctx->items, idx: 0)->obj->integer;
3063
3064 if (op_ctx->op->code == UACPI_AML_OP_SleepOp) {
3065 /*
3066 * ACPICA doesn't allow sleeps longer than 2 seconds,
3067 * so we shouldn't either.
3068 */
3069 if (time > 2000)
3070 time = 2000;
3071
3072 uacpi_kernel_sleep(msec: time);
3073 } else {
3074 // Spec says this must evaluate to a ByteData
3075 time &= 0xFF;
3076 uacpi_kernel_stall(usec: time);
3077 }
3078
3079 return UACPI_STATUS_OK;
3080}
3081
3082static uacpi_status handle_bcd(struct execution_context *ctx)
3083{
3084 struct op_context *op_ctx = ctx->cur_op_ctx;
3085 uacpi_u64 src, dst = 0;
3086 uacpi_size i;
3087 uacpi_object *dst_obj;
3088
3089 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj->integer;
3090 dst_obj = item_array_at(arr: &op_ctx->items, idx: 2)->obj;
3091 i = 64;
3092
3093 /*
3094 * NOTE: ACPICA just errors out for invalid BCD, but NT allows it just fine.
3095 * FromBCD matches NT behavior 1:1 even for invalid BCD, but ToBCD
3096 * produces different results when the input is too large.
3097 */
3098 if (op_ctx->op->code == UACPI_AML_OP_FromBCDOp) {
3099 do {
3100 i -= 4;
3101 dst *= 10;
3102 dst += (src >> i) & 0b1111;
3103 } while (i);
3104 } else {
3105 while (src != 0) {
3106 dst >>= 4;
3107 i -= 4;
3108 dst |= (src % 10) << 60;
3109 src /= 10;
3110 }
3111
3112 dst >>= (i % 64);
3113 }
3114
3115 dst_obj->integer = dst;
3116 return UACPI_STATUS_OK;
3117}
3118
3119static uacpi_status handle_logical_not(struct execution_context *ctx)
3120{
3121 struct op_context *op_ctx = ctx->cur_op_ctx;
3122 uacpi_object *src, *dst;
3123
3124 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
3125 dst = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3126
3127 dst->type = UACPI_OBJECT_INTEGER;
3128 dst->integer = src->integer ? 0 : ones();
3129
3130 return UACPI_STATUS_OK;
3131}
3132
3133static uacpi_bool handle_logical_equality(uacpi_object *lhs, uacpi_object *rhs)
3134{
3135 uacpi_bool res = UACPI_FALSE;
3136
3137 if (lhs->type == UACPI_OBJECT_STRING || lhs->type == UACPI_OBJECT_BUFFER) {
3138 res = lhs->buffer->size == rhs->buffer->size;
3139
3140 if (res && lhs->buffer->size) {
3141 res = uacpi_memcmp(
3142 lhs: lhs->buffer->data,
3143 rhs: rhs->buffer->data,
3144 count: lhs->buffer->size
3145 ) == 0;
3146 }
3147 } else if (lhs->type == UACPI_OBJECT_INTEGER) {
3148 res = lhs->integer == rhs->integer;
3149 }
3150
3151 return res;
3152}
3153
3154static uacpi_bool handle_logical_less_or_greater(
3155 uacpi_aml_op op, uacpi_object *lhs, uacpi_object *rhs
3156)
3157{
3158 if (lhs->type == UACPI_OBJECT_STRING || lhs->type == UACPI_OBJECT_BUFFER) {
3159 int res;
3160 uacpi_buffer *lhs_buf, *rhs_buf;
3161
3162 lhs_buf = lhs->buffer;
3163 rhs_buf = rhs->buffer;
3164
3165 res = uacpi_memcmp(lhs: lhs_buf->data, rhs: rhs_buf->data,
3166 UACPI_MIN(lhs_buf->size, rhs_buf->size));
3167 if (res == 0) {
3168 if (lhs_buf->size < rhs_buf->size)
3169 res = -1;
3170 else if (lhs_buf->size > rhs_buf->size)
3171 res = 1;
3172 }
3173
3174 if (op == UACPI_AML_OP_LLessOp)
3175 return res < 0;
3176
3177 return res > 0;
3178 }
3179
3180 if (op == UACPI_AML_OP_LLessOp)
3181 return lhs->integer < rhs->integer;
3182
3183 return lhs->integer > rhs->integer;
3184}
3185
3186static uacpi_status handle_binary_logic(struct execution_context *ctx)
3187{
3188 struct op_context *op_ctx = ctx->cur_op_ctx;
3189 uacpi_aml_op op = op_ctx->op->code;
3190 uacpi_object *lhs, *rhs, *dst;
3191 uacpi_bool res;
3192
3193 lhs = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
3194 rhs = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3195 dst = item_array_at(arr: &op_ctx->items, idx: 2)->obj;
3196
3197 switch (op) {
3198 case UACPI_AML_OP_LEqualOp:
3199 case UACPI_AML_OP_LLessOp:
3200 case UACPI_AML_OP_LGreaterOp:
3201 // TODO: typecheck at parse time
3202 if (lhs->type != rhs->type) {
3203 uacpi_error(
3204 "don't know how to do a logical comparison of '%s' and '%s'\n",
3205 uacpi_object_type_to_string(lhs->type),
3206 uacpi_object_type_to_string(rhs->type)
3207 );
3208 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
3209 }
3210
3211 if (op == UACPI_AML_OP_LEqualOp)
3212 res = handle_logical_equality(lhs, rhs);
3213 else
3214 res = handle_logical_less_or_greater(op, lhs, rhs);
3215 break;
3216 default: {
3217 uacpi_u64 lhs_int, rhs_int;
3218
3219 // NT only looks at the first 4 bytes of a buffer
3220 lhs_int = object_to_integer(obj: lhs, max_buffer_bytes: 4);
3221 rhs_int = object_to_integer(obj: rhs, max_buffer_bytes: 4);
3222
3223 if (op == UACPI_AML_OP_LandOp)
3224 res = lhs_int && rhs_int;
3225 else
3226 res = lhs_int || rhs_int;
3227 break;
3228 }
3229 }
3230
3231 dst->integer = res ? ones() : 0;
3232 return UACPI_STATUS_OK;
3233}
3234
3235enum match_op {
3236 MTR = 0,
3237 MEQ = 1,
3238 MLE = 2,
3239 MLT = 3,
3240 MGE = 4,
3241 MGT = 5,
3242};
3243
3244static uacpi_bool match_one(enum match_op op, uacpi_u64 lhs, uacpi_u64 rhs)
3245{
3246 switch (op) {
3247 case MTR:
3248 return UACPI_TRUE;
3249 case MEQ:
3250 return lhs == rhs;
3251 case MLE:
3252 return lhs <= rhs;
3253 case MLT:
3254 return lhs < rhs;
3255 case MGE:
3256 return lhs >= rhs;
3257 case MGT:
3258 return lhs > rhs;
3259 default:
3260 return UACPI_FALSE;
3261 }
3262}
3263
3264static uacpi_status handle_match(struct execution_context *ctx)
3265{
3266 struct op_context *op_ctx = ctx->cur_op_ctx;
3267 uacpi_package *pkg;
3268 uacpi_u64 operand0, operand1, start_idx, i;
3269 enum match_op mop0, mop1;
3270 uacpi_object *dst;
3271
3272 pkg = item_array_at(arr: &op_ctx->items, idx: 0)->obj->package;
3273 mop0 = item_array_at(arr: &op_ctx->items, idx: 1)->immediate;
3274 operand0 = item_array_at(arr: &op_ctx->items, idx: 2)->obj->integer;
3275 mop1 = item_array_at(arr: &op_ctx->items, idx: 3)->immediate;
3276 operand1 = item_array_at(arr: &op_ctx->items, idx: 4)->obj->integer;
3277 start_idx = item_array_at(arr: &op_ctx->items, idx: 5)->obj->integer;
3278 dst = item_array_at(arr: &op_ctx->items, idx: 6)->obj;
3279
3280 for (i = start_idx; i < pkg->count; ++i) {
3281 uacpi_object *obj = pkg->objects[i];
3282
3283 if (obj->type != UACPI_OBJECT_INTEGER)
3284 continue;
3285
3286 if (match_one(op: mop0, lhs: obj->integer, rhs: operand0) &&
3287 match_one(op: mop1, lhs: obj->integer, rhs: operand1))
3288 break;
3289 }
3290
3291 if (i < pkg->count)
3292 dst->integer = i;
3293 else
3294 dst->integer = ones();
3295
3296 return UACPI_STATUS_OK;
3297}
3298
3299/*
3300 * PkgLength :=
3301 * PkgLeadByte |
3302 * <pkgleadbyte bytedata> |
3303 * <pkgleadbyte bytedata bytedata> | <pkgleadbyte bytedata bytedata bytedata>
3304 * PkgLeadByte :=
3305 * <bit 7-6: bytedata count that follows (0-3)>
3306 * <bit 5-4: only used if pkglength < 63>
3307 * <bit 3-0: least significant package length nybble>
3308 */
3309static uacpi_status parse_package_length(struct call_frame *frame,
3310 struct package_length *out_pkg)
3311{
3312 uacpi_u32 left, size;
3313 uacpi_u8 *data, marker_length;
3314
3315 out_pkg->begin = frame->code_offset;
3316 marker_length = 1;
3317
3318 left = call_frame_code_bytes_left(frame);
3319 if (uacpi_unlikely(left < 1))
3320 return UACPI_STATUS_AML_BAD_ENCODING;
3321
3322 data = call_frame_cursor(frame);
3323 marker_length += *data >> 6;
3324
3325 if (uacpi_unlikely(left < marker_length))
3326 return UACPI_STATUS_AML_BAD_ENCODING;
3327
3328 switch (marker_length) {
3329 case 1:
3330 size = *data & 0b111111;
3331 break;
3332 case 2:
3333 case 3:
3334 case 4: {
3335 uacpi_u32 temp_byte = 0;
3336
3337 size = *data & 0b1111;
3338 uacpi_memcpy(dest: &temp_byte, src: data + 1, count: marker_length - 1);
3339
3340 // marker_length - 1 is at most 3, so this shift is safe
3341 size |= temp_byte << 4;
3342 break;
3343 }
3344 }
3345
3346 frame->code_offset += marker_length;
3347
3348 out_pkg->end = out_pkg->begin + size;
3349 if (uacpi_unlikely(out_pkg->end < out_pkg->begin)) {
3350 uacpi_error(
3351 "PkgLength overflow: start=%u, size=%u\n", out_pkg->begin, size
3352 );
3353 return UACPI_STATUS_AML_BAD_ENCODING;
3354 }
3355
3356 return UACPI_STATUS_OK;
3357}
3358
3359/*
3360 * ByteData
3361 * // bit 0-2: ArgCount (0-7)
3362 * // bit 3: SerializeFlag
3363 * // 0 NotSerialized
3364 * // 1 Serialized
3365 * // bit 4-7: SyncLevel (0x00-0x0f)
3366 */
3367static void init_method_flags(uacpi_control_method *method, uacpi_u8 flags_byte)
3368{
3369 method->args = flags_byte & 0b111;
3370 method->is_serialized = (flags_byte >> 3) & 1;
3371 method->sync_level = flags_byte >> 4;
3372}
3373
3374static uacpi_status handle_create_method(struct execution_context *ctx)
3375{
3376 struct op_context *op_ctx = ctx->cur_op_ctx;
3377 struct uacpi_control_method *this_method, *method;
3378 struct package_length *pkg;
3379 struct uacpi_namespace_node *node;
3380 struct uacpi_object *dst;
3381 uacpi_u32 method_begin_offset;
3382
3383 this_method = ctx->cur_frame->method;
3384 pkg = &item_array_at(arr: &op_ctx->items, idx: 0)->pkg;
3385 node = item_array_at(arr: &op_ctx->items, idx: 1)->node;
3386 method_begin_offset = item_array_at(arr: &op_ctx->items, idx: 3)->immediate;
3387
3388 if (uacpi_unlikely(pkg->end < pkg->begin ||
3389 pkg->end < method_begin_offset ||
3390 pkg->end > this_method->size)) {
3391 uacpi_error(
3392 "invalid method %.4s bounds [%u..%u] (parent size is %u)\n",
3393 node->name.text, method_begin_offset, pkg->end, this_method->size
3394 );
3395 return UACPI_STATUS_AML_BAD_ENCODING;
3396 }
3397
3398 dst = item_array_at(arr: &op_ctx->items, idx: 4)->obj;
3399
3400 method = dst->method;
3401 init_method_flags(method, flags_byte: item_array_at(arr: &op_ctx->items, idx: 2)->immediate);
3402
3403 method->code = ctx->cur_frame->method->code;
3404 method->code += method_begin_offset;
3405 method->size = pkg->end - method_begin_offset;
3406
3407 node->object = uacpi_create_internal_reference(kind: UACPI_REFERENCE_KIND_NAMED,
3408 child: dst);
3409 if (uacpi_unlikely(node->object == UACPI_NULL))
3410 return UACPI_STATUS_OUT_OF_MEMORY;
3411
3412 return UACPI_STATUS_OK;
3413}
3414
3415static uacpi_status handle_create_mutex_or_event(struct execution_context *ctx)
3416{
3417 struct op_context *op_ctx = ctx->cur_op_ctx;
3418 uacpi_namespace_node *node;
3419 uacpi_object *dst;
3420
3421 node = item_array_at(arr: &op_ctx->items, idx: 0)->node;
3422
3423 if (op_ctx->op->code == UACPI_AML_OP_MutexOp) {
3424 dst = item_array_at(arr: &op_ctx->items, idx: 2)->obj;
3425
3426 // bits 0-3: SyncLevel (0x00-0x0f), bits 4-7: Reserved (must be 0)
3427 dst->mutex->sync_level = item_array_at(arr: &op_ctx->items, idx: 1)->immediate;
3428 dst->mutex->sync_level &= 0b1111;
3429 } else {
3430 dst = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3431 }
3432
3433 node->object = uacpi_create_internal_reference(
3434 kind: UACPI_REFERENCE_KIND_NAMED,
3435 child: dst
3436 );
3437 if (uacpi_unlikely(node->object == UACPI_NULL))
3438 return UACPI_STATUS_OUT_OF_MEMORY;
3439
3440 return UACPI_STATUS_OK;
3441}
3442
3443static uacpi_status handle_event_ctl(struct execution_context *ctx)
3444{
3445 struct op_context *op_ctx = ctx->cur_op_ctx;
3446 uacpi_object *obj;
3447
3448 obj = uacpi_unwrap_internal_reference(
3449 object: item_array_at(arr: &op_ctx->items, idx: 0)->obj
3450 );
3451 if (uacpi_unlikely(obj->type != UACPI_OBJECT_EVENT)) {
3452 uacpi_error(
3453 "%s: Invalid argument '%s', expected an Event object\n",
3454 op_ctx->op->name, uacpi_object_type_to_string(obj->type)
3455 );
3456 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
3457 }
3458
3459 switch (op_ctx->op->code)
3460 {
3461 case UACPI_AML_OP_SignalOp:
3462 uacpi_kernel_signal_event(obj->event->handle);
3463 break;
3464 case UACPI_AML_OP_ResetOp:
3465 uacpi_kernel_reset_event(obj->event->handle);
3466 break;
3467 case UACPI_AML_OP_WaitOp: {
3468 uacpi_u64 timeout;
3469 uacpi_bool ret;
3470
3471 timeout = item_array_at(arr: &op_ctx->items, idx: 1)->obj->integer;
3472 if (timeout > 0xFFFF)
3473 timeout = 0xFFFF;
3474
3475 ret = uacpi_kernel_wait_for_event(obj->event->handle, timeout);
3476
3477 /*
3478 * The return value here is inverted, we return 0 for success and Ones
3479 * for timeout and everything else.
3480 */
3481 if (ret)
3482 item_array_at(arr: &op_ctx->items, idx: 2)->obj->integer = 0;
3483 break;
3484 }
3485 default:
3486 return UACPI_STATUS_INVALID_ARGUMENT;
3487 }
3488
3489 return UACPI_STATUS_OK;
3490}
3491
3492static uacpi_status handle_mutex_ctl(struct execution_context *ctx)
3493{
3494 struct op_context *op_ctx = ctx->cur_op_ctx;
3495 uacpi_object *obj;
3496
3497 obj = uacpi_unwrap_internal_reference(
3498 object: item_array_at(arr: &op_ctx->items, idx: 0)->obj
3499 );
3500 if (uacpi_unlikely(obj->type != UACPI_OBJECT_MUTEX)) {
3501 uacpi_error(
3502 "%s: invalid argument '%s', expected a Mutex object\n",
3503 op_ctx->op->name, uacpi_object_type_to_string(obj->type)
3504 );
3505 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
3506 }
3507
3508 switch (op_ctx->op->code)
3509 {
3510 case UACPI_AML_OP_AcquireOp: {
3511 uacpi_u64 timeout;
3512 uacpi_u64 *return_value;
3513 uacpi_status ret;
3514
3515 return_value = &item_array_at(arr: &op_ctx->items, idx: 2)->obj->integer;
3516
3517 if (uacpi_unlikely(ctx->sync_level > obj->mutex->sync_level)) {
3518 uacpi_warn(
3519 "ignoring attempt to acquire mutex @%p with a lower sync level "
3520 "(%d < %d)\n", obj->mutex, obj->mutex->sync_level,
3521 ctx->sync_level
3522 );
3523 break;
3524 }
3525
3526 timeout = item_array_at(arr: &op_ctx->items, idx: 1)->immediate;
3527 if (timeout > 0xFFFF)
3528 timeout = 0xFFFF;
3529
3530 if (uacpi_this_thread_owns_aml_mutex(obj->mutex)) {
3531 if (uacpi_likely(uacpi_acquire_aml_mutex(obj->mutex, timeout)))
3532 *return_value = 0;
3533 break;
3534 }
3535
3536 if (!uacpi_acquire_aml_mutex(obj->mutex, timeout))
3537 break;
3538
3539 ret = held_mutexes_array_push(arr: &ctx->held_mutexes, mutex: obj->mutex);
3540 if (uacpi_unlikely_error(ret)) {
3541 uacpi_release_aml_mutex(obj->mutex);
3542 return ret;
3543 }
3544
3545 ctx->sync_level = obj->mutex->sync_level;
3546 *return_value = 0;
3547 break;
3548 }
3549
3550 case UACPI_AML_OP_ReleaseOp: {
3551 uacpi_status ret;
3552
3553 if (!uacpi_this_thread_owns_aml_mutex(obj->mutex)) {
3554 uacpi_warn(
3555 "attempted to release not-previously-acquired mutex object "
3556 "@%p (%p)\n", obj->mutex, obj->mutex->handle
3557 );
3558 break;
3559 }
3560
3561 ret = held_mutexes_array_remove_and_release(
3562 arr: &ctx->held_mutexes, mutex: obj->mutex,
3563 force: FORCE_RELEASE_NO
3564 );
3565 if (uacpi_likely_success(ret)) {
3566 uacpi_mutex **last_mutex;
3567
3568 last_mutex = held_mutexes_array_last(arr: &ctx->held_mutexes);
3569 if (last_mutex == UACPI_NULL) {
3570 ctx->sync_level = 0;
3571 break;
3572 }
3573
3574 ctx->sync_level = (*last_mutex)->sync_level;
3575 }
3576 break;
3577 }
3578
3579 default:
3580 return UACPI_STATUS_INVALID_ARGUMENT;
3581 }
3582
3583 return UACPI_STATUS_OK;
3584}
3585
3586static uacpi_status handle_notify(struct execution_context *ctx)
3587{
3588 uacpi_status ret;
3589 struct op_context *op_ctx = ctx->cur_op_ctx;
3590 struct uacpi_namespace_node *node;
3591 uacpi_u64 value;
3592
3593 node = item_array_at(arr: &op_ctx->items, idx: 0)->node;
3594 value = item_array_at(arr: &op_ctx->items, idx: 1)->obj->integer;
3595
3596 ret = uacpi_notify_all(node, value);
3597 if (uacpi_likely_success(ret))
3598 return ret;
3599
3600 if (ret == UACPI_STATUS_NO_HANDLER) {
3601 const uacpi_char *path;
3602
3603 path = uacpi_namespace_node_generate_absolute_path(node);
3604 uacpi_warn(
3605 "ignoring firmware Notify(%s, 0x%"UACPI_PRIX64") request, "
3606 "no listeners\n", path, UACPI_FMT64(value)
3607 );
3608 uacpi_free_dynamic_string(str: path);
3609
3610 return UACPI_STATUS_OK;
3611 }
3612
3613 if (ret == UACPI_STATUS_INVALID_ARGUMENT) {
3614 uacpi_error("Notify() called on an invalid object %.4s\n",
3615 node->name.text);
3616 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
3617 }
3618
3619 return ret;
3620}
3621
3622static uacpi_status handle_firmware_request(struct execution_context *ctx)
3623{
3624 struct op_context *op_ctx = ctx->cur_op_ctx;
3625 uacpi_firmware_request req = { 0 };
3626
3627 switch (op_ctx->op->code) {
3628 case UACPI_AML_OP_BreakPointOp:
3629 req.type = UACPI_FIRMWARE_REQUEST_TYPE_BREAKPOINT;
3630 req.breakpoint.ctx = ctx;
3631 break;
3632 case UACPI_AML_OP_FatalOp:
3633 req.type = UACPI_FIRMWARE_REQUEST_TYPE_FATAL;
3634 req.fatal.type = item_array_at(arr: &op_ctx->items, idx: 0)->immediate;
3635 req.fatal.code = item_array_at(arr: &op_ctx->items, idx: 1)->immediate;
3636 req.fatal.arg = item_array_at(arr: &op_ctx->items, idx: 2)->obj->integer;
3637 break;
3638 default:
3639 return UACPI_STATUS_INVALID_ARGUMENT;
3640 }
3641
3642 uacpi_kernel_handle_firmware_request(&req);
3643 return UACPI_STATUS_OK;
3644}
3645
3646static uacpi_status handle_create_named(struct execution_context *ctx)
3647{
3648 struct op_context *op_ctx = ctx->cur_op_ctx;
3649 struct uacpi_namespace_node *node;
3650 uacpi_object *src;
3651
3652 node = item_array_at(arr: &op_ctx->items, idx: 0)->node;
3653 src = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3654
3655 node->object = uacpi_create_internal_reference(kind: UACPI_REFERENCE_KIND_NAMED,
3656 child: src);
3657 if (uacpi_unlikely(node->object == UACPI_NULL))
3658 return UACPI_STATUS_OUT_OF_MEMORY;
3659
3660 return UACPI_STATUS_OK;
3661}
3662
3663static uacpi_object_type buffer_field_get_read_type(
3664 struct uacpi_buffer_field *field
3665)
3666{
3667 if (field->bit_length > (g_uacpi_rt_ctx.is_rev1 ? 32u : 64u) ||
3668 field->force_buffer)
3669 return UACPI_OBJECT_BUFFER;
3670
3671 return UACPI_OBJECT_INTEGER;
3672}
3673
3674static uacpi_object_type field_unit_get_read_type(
3675 struct uacpi_field_unit *field
3676)
3677{
3678 if (field->bit_length > (g_uacpi_rt_ctx.is_rev1 ? 32u : 64u))
3679 return UACPI_OBJECT_BUFFER;
3680
3681 return UACPI_OBJECT_INTEGER;
3682}
3683
3684static uacpi_object_type field_get_read_type(uacpi_object *obj)
3685{
3686 if (obj->type == UACPI_OBJECT_BUFFER_FIELD)
3687 return buffer_field_get_read_type(field: &obj->buffer_field);
3688
3689 return field_unit_get_read_type(field: obj->field_unit);
3690}
3691
3692static uacpi_status handle_field_read(struct execution_context *ctx)
3693{
3694 struct op_context *op_ctx = ctx->cur_op_ctx;
3695 struct uacpi_namespace_node *node;
3696 uacpi_object *src_obj, *dst_obj;
3697 uacpi_size dst_size;
3698 void *dst;
3699
3700 node = item_array_at(arr: &op_ctx->items, idx: 0)->node;
3701 src_obj = uacpi_namespace_node_get_object(node);
3702 dst_obj = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3703
3704 if (field_get_read_type(obj: src_obj) == UACPI_OBJECT_BUFFER) {
3705 uacpi_buffer *buf;
3706
3707 buf = dst_obj->buffer;
3708 dst_size = field_byte_size(obj: src_obj);
3709
3710 dst = uacpi_kernel_calloc(count: dst_size, size: 1);
3711 if (dst == UACPI_NULL)
3712 return UACPI_STATUS_OUT_OF_MEMORY;
3713
3714 buf->data = dst;
3715 buf->size = dst_size;
3716 } else {
3717 dst = &dst_obj->integer;
3718 dst_size = sizeof(uacpi_u64);
3719 }
3720
3721 if (src_obj->type == UACPI_OBJECT_BUFFER_FIELD) {
3722 uacpi_read_buffer_field(field: &src_obj->buffer_field, dst);
3723 return UACPI_STATUS_OK;
3724 }
3725
3726 return uacpi_read_field_unit(field: src_obj->field_unit, dst, size: dst_size);
3727}
3728
3729static uacpi_status handle_create_buffer_field(struct execution_context *ctx)
3730{
3731 struct op_context *op_ctx = ctx->cur_op_ctx;
3732 struct uacpi_namespace_node *node;
3733 uacpi_buffer *src_buf;
3734 uacpi_object *field_obj;
3735 uacpi_buffer_field *field;
3736
3737 /*
3738 * Layout of items here:
3739 * [0] -> Type checked source buffer object
3740 * [1] -> Byte/bit index integer object
3741 * [2] ( if CreateField) -> bit length integer object
3742 * [3] (2 if not CreateField) -> the new namespace node
3743 * [4] (3 if not CreateField) -> the buffer field object we're creating here
3744 */
3745 src_buf = item_array_at(arr: &op_ctx->items, idx: 0)->obj->buffer;
3746
3747 if (op_ctx->op->code == UACPI_AML_OP_CreateFieldOp) {
3748 uacpi_object *idx_obj, *len_obj;
3749
3750 idx_obj = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3751 len_obj = item_array_at(arr: &op_ctx->items, idx: 2)->obj;
3752 node = item_array_at(arr: &op_ctx->items, idx: 3)->node;
3753 field_obj = item_array_at(arr: &op_ctx->items, idx: 4)->obj;
3754 field = &field_obj->buffer_field;
3755
3756 field->bit_index = idx_obj->integer;
3757
3758 if (uacpi_unlikely(!len_obj->integer ||
3759 len_obj->integer > 0xFFFFFFFF)) {
3760 uacpi_error("invalid bit field length (%u)\n", field->bit_length);
3761 return UACPI_STATUS_AML_BAD_ENCODING;
3762 }
3763
3764 field->bit_length = len_obj->integer;
3765 field->force_buffer = UACPI_TRUE;
3766 } else {
3767 uacpi_object *idx_obj;
3768
3769 idx_obj = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3770 node = item_array_at(arr: &op_ctx->items, idx: 2)->node;
3771 field_obj = item_array_at(arr: &op_ctx->items, idx: 3)->obj;
3772 field = &field_obj->buffer_field;
3773
3774 field->bit_index = idx_obj->integer;
3775 switch (op_ctx->op->code) {
3776 case UACPI_AML_OP_CreateBitFieldOp:
3777 field->bit_length = 1;
3778 break;
3779 case UACPI_AML_OP_CreateByteFieldOp:
3780 field->bit_length = 8;
3781 break;
3782 case UACPI_AML_OP_CreateWordFieldOp:
3783 field->bit_length = 16;
3784 break;
3785 case UACPI_AML_OP_CreateDWordFieldOp:
3786 field->bit_length = 32;
3787 break;
3788 case UACPI_AML_OP_CreateQWordFieldOp:
3789 field->bit_length = 64;
3790 break;
3791 default:
3792 return UACPI_STATUS_INVALID_ARGUMENT;
3793 }
3794
3795 if (op_ctx->op->code != UACPI_AML_OP_CreateBitFieldOp)
3796 field->bit_index *= 8;
3797 }
3798
3799 if (uacpi_unlikely((field->bit_index + field->bit_length) >
3800 src_buf->size * 8)) {
3801 uacpi_error(
3802 "Invalid buffer field: bits [%zu..%zu], buffer size is %zu bytes\n",
3803 field->bit_index, field->bit_index + field->bit_length,
3804 src_buf->size
3805 );
3806 return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX;
3807 }
3808
3809 field->backing = src_buf;
3810 uacpi_shareable_ref(field->backing);
3811 node->object = uacpi_create_internal_reference(kind: UACPI_REFERENCE_KIND_NAMED,
3812 child: field_obj);
3813 if (uacpi_unlikely(node->object == UACPI_NULL))
3814 return UACPI_STATUS_OUT_OF_MEMORY;
3815
3816 return UACPI_STATUS_OK;
3817}
3818
3819static uacpi_status handle_control_flow(struct execution_context *ctx)
3820{
3821 struct call_frame *frame = ctx->cur_frame;
3822 struct op_context *op_ctx = ctx->cur_op_ctx;
3823
3824 if (uacpi_unlikely(frame->last_while == UACPI_NULL)) {
3825 uacpi_error(
3826 "attempting to %s outside of a While block\n",
3827 op_ctx->op->code == UACPI_AML_OP_BreakOp ? "Break" : "Continue"
3828 );
3829 return UACPI_STATUS_AML_BAD_ENCODING;
3830 }
3831
3832 for (;;) {
3833 if (ctx->cur_block != frame->last_while) {
3834 frame_reset_post_end_block(ctx, type: ctx->cur_block->type);
3835 continue;
3836 }
3837
3838 if (op_ctx->op->code == UACPI_AML_OP_BreakOp)
3839 frame->code_offset = ctx->cur_block->end;
3840 else
3841 frame->code_offset = ctx->cur_block->begin;
3842 frame_reset_post_end_block(ctx, type: ctx->cur_block->type);
3843 break;
3844 }
3845
3846 return UACPI_STATUS_OK;
3847}
3848
3849static uacpi_status create_named_scope(struct op_context *op_ctx)
3850{
3851 uacpi_namespace_node *node;
3852 uacpi_object *obj;
3853
3854 node = item_array_at(arr: &op_ctx->items, idx: 1)->node;
3855 obj = item_array_last(arr: &op_ctx->items)->obj;
3856
3857 switch (op_ctx->op->code) {
3858 case UACPI_AML_OP_ProcessorOp: {
3859 uacpi_processor *proc = obj->processor;
3860 proc->id = item_array_at(arr: &op_ctx->items, idx: 2)->immediate;
3861 proc->block_address = item_array_at(arr: &op_ctx->items, idx: 3)->immediate;
3862 proc->block_length = item_array_at(arr: &op_ctx->items, idx: 4)->immediate;
3863 break;
3864 }
3865
3866 case UACPI_AML_OP_PowerResOp: {
3867 uacpi_power_resource *power_res = &obj->power_resource;
3868 power_res->system_level = item_array_at(arr: &op_ctx->items, idx: 2)->immediate;
3869 power_res->resource_order = item_array_at(arr: &op_ctx->items, idx: 3)->immediate;
3870 break;
3871 }
3872
3873 default:
3874 break;
3875 }
3876
3877 node->object = uacpi_create_internal_reference(kind: UACPI_REFERENCE_KIND_NAMED,
3878 child: obj);
3879 if (uacpi_unlikely(node->object == UACPI_NULL))
3880 return UACPI_STATUS_OUT_OF_MEMORY;
3881
3882 return UACPI_STATUS_OK;
3883}
3884
3885static uacpi_status handle_code_block(struct execution_context *ctx)
3886{
3887 struct op_context *op_ctx = ctx->cur_op_ctx;
3888 struct package_length *pkg;
3889 uacpi_bool skip_block;
3890
3891 pkg = &item_array_at(arr: &op_ctx->items, idx: 0)->pkg;
3892
3893 switch (op_ctx->op->code) {
3894 case UACPI_AML_OP_ElseOp:
3895 skip_block = ctx->skip_else;
3896 break;
3897 case UACPI_AML_OP_ProcessorOp:
3898 case UACPI_AML_OP_PowerResOp:
3899 case UACPI_AML_OP_ThermalZoneOp:
3900 case UACPI_AML_OP_DeviceOp: {
3901 uacpi_status ret;
3902
3903 ret = create_named_scope(op_ctx);
3904 if (uacpi_unlikely_error(ret))
3905 return ret;
3906
3907 UACPI_FALLTHROUGH;
3908 }
3909 case UACPI_AML_OP_ScopeOp:
3910 skip_block = UACPI_FALSE;
3911 break;
3912 case UACPI_AML_OP_IfOp:
3913 case UACPI_AML_OP_WhileOp: {
3914 uacpi_object *operand;
3915
3916 operand = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
3917 skip_block = operand->integer == 0;
3918 break;
3919 }
3920 default:
3921 return UACPI_STATUS_INVALID_ARGUMENT;
3922 }
3923
3924 if (skip_block) {
3925 ctx->cur_frame->code_offset = pkg->end;
3926 return UACPI_STATUS_OK;
3927 }
3928
3929 return begin_block_execution(ctx);
3930}
3931
3932static uacpi_status handle_return(struct execution_context *ctx)
3933{
3934 uacpi_status ret;
3935 uacpi_object *dst = UACPI_NULL;
3936
3937 ctx->cur_frame->code_offset = ctx->cur_frame->method->size;
3938 ret = method_get_ret_object(ctx, out_obj: &dst);
3939
3940 if (uacpi_unlikely_error(ret))
3941 return ret;
3942 if (dst == UACPI_NULL)
3943 return UACPI_STATUS_OK;
3944
3945 /*
3946 * Should be possible to move here if method returns a literal
3947 * like Return(Buffer { ... }), otherwise we have to copy just to
3948 * be safe.
3949 */
3950 return uacpi_object_assign(
3951 dst,
3952 src: item_array_at(arr: &ctx->cur_op_ctx->items, idx: 0)->obj,
3953 UACPI_ASSIGN_BEHAVIOR_DEEP_COPY
3954 );
3955}
3956
3957static void refresh_ctx_pointers(struct execution_context *ctx)
3958{
3959 struct call_frame *frame = ctx->cur_frame;
3960
3961 if (frame == UACPI_NULL) {
3962 ctx->cur_op_ctx = UACPI_NULL;
3963 ctx->prev_op_ctx = UACPI_NULL;
3964 ctx->cur_block = UACPI_NULL;
3965 return;
3966 }
3967
3968 ctx->cur_op_ctx = op_context_array_last(arr: &frame->pending_ops);
3969 ctx->prev_op_ctx = op_context_array_one_before_last(arr: &frame->pending_ops);
3970 ctx->cur_block = code_block_array_last(arr: &frame->code_blocks);
3971}
3972
3973static uacpi_bool ctx_has_non_preempted_op(struct execution_context *ctx)
3974{
3975 return ctx->cur_op_ctx && !ctx->cur_op_ctx->preempted;
3976}
3977
3978enum op_trace_action_type {
3979 OP_TRACE_ACTION_BEGIN,
3980 OP_TRACE_ACTION_RESUME,
3981 OP_TRACE_ACTION_END,
3982};
3983
3984static const uacpi_char *const op_trace_action_types[3] = {
3985 [OP_TRACE_ACTION_BEGIN] = "BEGIN",
3986 [OP_TRACE_ACTION_RESUME] = "RESUME",
3987 [OP_TRACE_ACTION_END] = "END",
3988};
3989
3990static inline void trace_op(
3991 const struct uacpi_op_spec *op, enum op_trace_action_type action
3992)
3993{
3994 uacpi_debug(
3995 "%s OP '%s' (0x%04X)\n",
3996 op_trace_action_types[action], op->name, op->code
3997 );
3998}
3999
4000static inline void trace_pop(uacpi_u8 pop)
4001{
4002 uacpi_debug(" pOP: %s (0x%02X)\n", uacpi_parse_op_to_string(pop), pop);
4003}
4004
4005static uacpi_status frame_push_args(struct call_frame *frame,
4006 struct op_context *op_ctx)
4007{
4008 uacpi_size i;
4009
4010 /*
4011 * MethodCall items:
4012 * items[0] -> method namespace node
4013 * items[1] -> immediate that was used for parsing the arguments
4014 * items[2...nargs-1] -> method arguments
4015 * items[-1] -> return value object
4016 *
4017 * Here we only care about the arguments though.
4018 */
4019 for (i = 2; i < item_array_size(arr: &op_ctx->items) - 1; i++) {
4020 uacpi_object *src, *dst;
4021
4022 src = item_array_at(arr: &op_ctx->items, idx: i)->obj;
4023
4024 dst = uacpi_create_internal_reference(kind: UACPI_REFERENCE_KIND_ARG, child: src);
4025 if (uacpi_unlikely(dst == UACPI_NULL))
4026 return UACPI_STATUS_OUT_OF_MEMORY;
4027
4028 frame->args[i - 2] = dst;
4029 }
4030
4031 return UACPI_STATUS_OK;
4032}
4033
4034static uacpi_status frame_setup_base_scope(struct call_frame *frame,
4035 uacpi_namespace_node *scope,
4036 uacpi_control_method *method)
4037{
4038 struct code_block *block;
4039
4040 block = code_block_array_alloc(arr: &frame->code_blocks);
4041 if (uacpi_unlikely(block == UACPI_NULL))
4042 return UACPI_STATUS_OUT_OF_MEMORY;
4043
4044 block->type = CODE_BLOCK_SCOPE;
4045 block->node = scope;
4046 block->begin = 0;
4047 block->end = method->size;
4048 frame->method = method;
4049 frame->cur_scope = scope;
4050 return UACPI_STATUS_OK;
4051}
4052
4053static uacpi_status push_new_frame(struct execution_context *ctx,
4054 struct call_frame **out_frame)
4055{
4056 struct call_frame_array *call_stack = &ctx->call_stack;
4057 struct call_frame *prev_frame;
4058
4059 *out_frame = call_frame_array_calloc(arr: call_stack);
4060 if (uacpi_unlikely(*out_frame == UACPI_NULL))
4061 return UACPI_STATUS_OUT_OF_MEMORY;
4062
4063 /*
4064 * Allocating a new frame might have reallocated the dynamic buffer so our
4065 * execution_context members might now be pointing to freed memory.
4066 * Refresh them here.
4067 */
4068 prev_frame = call_frame_array_one_before_last(arr: call_stack);
4069 ctx->cur_frame = prev_frame;
4070 refresh_ctx_pointers(ctx);
4071
4072 return UACPI_STATUS_OK;
4073}
4074
4075static uacpi_bool maybe_end_block(struct execution_context *ctx)
4076{
4077 struct code_block *block = ctx->cur_block;
4078 struct call_frame *cur_frame = ctx->cur_frame;
4079
4080 if (!block)
4081 return UACPI_FALSE;
4082 if (cur_frame->code_offset != block->end)
4083 return UACPI_FALSE;
4084
4085 ctx->skip_else = UACPI_FALSE;
4086
4087 if (block->type == CODE_BLOCK_WHILE) {
4088 cur_frame->code_offset = block->begin;
4089 } else if (block->type == CODE_BLOCK_IF) {
4090 ctx->skip_else = UACPI_TRUE;
4091 }
4092
4093 frame_reset_post_end_block(ctx, type: block->type);
4094 return UACPI_TRUE;
4095}
4096
4097static uacpi_status store_to_target(uacpi_object *dst, uacpi_object *src)
4098{
4099 uacpi_status ret;
4100
4101 switch (dst->type) {
4102 case UACPI_OBJECT_DEBUG:
4103 ret = debug_store(src);
4104 break;
4105 case UACPI_OBJECT_REFERENCE:
4106 ret = store_to_reference(dst, src);
4107 break;
4108
4109 case UACPI_OBJECT_BUFFER_INDEX:
4110 src = uacpi_unwrap_internal_reference(object: src);
4111 ret = object_assign_with_implicit_cast(dst, src);
4112 break;
4113
4114 case UACPI_OBJECT_INTEGER:
4115 // NULL target
4116 if (dst->integer == 0) {
4117 ret = UACPI_STATUS_OK;
4118 break;
4119 }
4120 UACPI_FALLTHROUGH;
4121 default:
4122 uacpi_error("attempted to store to an invalid target: %s\n",
4123 uacpi_object_type_to_string(dst->type));
4124 ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
4125 }
4126
4127 return ret;
4128}
4129
4130static uacpi_status handle_copy_object_or_store(struct execution_context *ctx)
4131{
4132 uacpi_object *src, *dst;
4133 struct op_context *op_ctx = ctx->cur_op_ctx;
4134
4135 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
4136 dst = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
4137
4138 if (op_ctx->op->code == UACPI_AML_OP_StoreOp)
4139 return store_to_target(dst, src);
4140
4141 if (dst->type != UACPI_OBJECT_REFERENCE)
4142 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
4143
4144 return copy_object_to_reference(dst, src);
4145}
4146
4147static uacpi_status handle_inc_dec(struct execution_context *ctx)
4148{
4149 uacpi_object *src, *dst;
4150 struct op_context *op_ctx = ctx->cur_op_ctx;
4151 uacpi_bool field_allowed = UACPI_FALSE;
4152 uacpi_object_type true_src_type;
4153 uacpi_status ret;
4154
4155 src = item_array_at(arr: &op_ctx->items, idx: 0)->obj;
4156 dst = item_array_at(arr: &op_ctx->items, idx: 1)->obj;
4157
4158 if (src->type == UACPI_OBJECT_REFERENCE) {
4159 /*
4160 * Increment/Decrement are the only two operators that modify the value
4161 * in-place, thus we need very specific dereference rules here.
4162 *
4163 * Reading buffer fields & field units is only allowed if we were passed
4164 * a namestring directly as opposed to some nested reference chain
4165 * containing a field at the bottom.
4166 */
4167 if (src->flags == UACPI_REFERENCE_KIND_NAMED)
4168 field_allowed = src->inner_object->type != UACPI_OBJECT_REFERENCE;
4169
4170 src = reference_unwind(obj: src)->inner_object;
4171 } // else buffer index
4172
4173 true_src_type = src->type;
4174
4175 switch (true_src_type) {
4176 case UACPI_OBJECT_INTEGER:
4177 dst->integer = src->integer;
4178 break;
4179 case UACPI_OBJECT_FIELD_UNIT:
4180 case UACPI_OBJECT_BUFFER_FIELD:
4181 if (uacpi_unlikely(!field_allowed))
4182 goto out_bad_type;
4183
4184 true_src_type = field_get_read_type(obj: src);
4185 if (true_src_type != UACPI_OBJECT_INTEGER)
4186 goto out_bad_type;
4187
4188 if (src->type == UACPI_OBJECT_FIELD_UNIT) {
4189 ret = uacpi_read_field_unit(
4190 field: src->field_unit, dst: &dst->integer, size: sizeof_int()
4191 );
4192 if (uacpi_unlikely_error(ret))
4193 return ret;
4194 } else {
4195 uacpi_read_buffer_field(field: &src->buffer_field, dst: &dst->integer);
4196 }
4197 break;
4198 case UACPI_OBJECT_BUFFER_INDEX:
4199 dst->integer = *buffer_index_cursor(buf_idx: &src->buffer_index);
4200 break;
4201 default:
4202 goto out_bad_type;
4203 }
4204
4205 if (op_ctx->op->code == UACPI_AML_OP_IncrementOp)
4206 dst->integer++;
4207 else
4208 dst->integer--;
4209
4210 return UACPI_STATUS_OK;
4211
4212out_bad_type:
4213 uacpi_error("Increment/Decrement: invalid object type '%s'\n",
4214 uacpi_object_type_to_string(true_src_type));
4215 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
4216}
4217
4218static uacpi_status enter_method(
4219 struct execution_context *ctx, struct call_frame *new_frame,
4220 uacpi_control_method *method
4221)
4222{
4223 uacpi_status ret = UACPI_STATUS_OK;
4224
4225 if (!method->is_serialized)
4226 return ret;
4227
4228 if (uacpi_unlikely(ctx->sync_level > method->sync_level)) {
4229 uacpi_error(
4230 "cannot invoke method @%p, sync level %d is too low "
4231 "(current is %d)\n",
4232 method, method->sync_level, ctx->sync_level
4233 );
4234 return UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH;
4235 }
4236
4237 if (method->mutex == UACPI_NULL) {
4238 method->mutex = uacpi_create_mutex();
4239 if (uacpi_unlikely(method->mutex == UACPI_NULL))
4240 return UACPI_STATUS_OUT_OF_MEMORY;
4241 method->mutex->sync_level = method->sync_level;
4242 }
4243
4244 if (!uacpi_this_thread_owns_aml_mutex(method->mutex)) {
4245 if (uacpi_unlikely(!uacpi_acquire_aml_mutex(method->mutex, 0xFFFF)))
4246 return UACPI_STATUS_INTERNAL_ERROR;
4247
4248 ret = held_mutexes_array_push(arr: &ctx->held_mutexes, mutex: method->mutex);
4249 if (uacpi_unlikely_error(ret)) {
4250 uacpi_release_aml_mutex(method->mutex);
4251 return ret;
4252 }
4253 }
4254
4255 new_frame->prev_sync_level = ctx->sync_level;
4256 ctx->sync_level = method->sync_level;
4257 return UACPI_STATUS_OK;
4258}
4259
4260static uacpi_status push_op(struct execution_context *ctx)
4261{
4262 struct call_frame *frame = ctx->cur_frame;
4263 struct op_context *op_ctx;
4264
4265 op_ctx = op_context_array_calloc(arr: &frame->pending_ops);
4266 if (op_ctx == UACPI_NULL)
4267 return UACPI_STATUS_OUT_OF_MEMORY;
4268
4269 op_ctx->op = ctx->cur_op;
4270 refresh_ctx_pointers(ctx);
4271 return UACPI_STATUS_OK;
4272}
4273
4274static uacpi_bool pop_item(struct op_context *op_ctx)
4275{
4276 struct item *item;
4277
4278 if (item_array_size(arr: &op_ctx->items) == 0)
4279 return UACPI_FALSE;
4280
4281 item = item_array_last(arr: &op_ctx->items);
4282
4283 if (item->type == ITEM_OBJECT)
4284 uacpi_object_unref(obj: item->obj);
4285 if (item->type == ITEM_NAMESPACE_NODE_METHOD_LOCAL)
4286 uacpi_namespace_node_unref(node: item->node);
4287
4288 item_array_pop(arr: &op_ctx->items);
4289 return UACPI_TRUE;
4290}
4291
4292static void pop_op(struct execution_context *ctx)
4293{
4294 struct call_frame *frame = ctx->cur_frame;
4295 struct op_context *cur_op_ctx = ctx->cur_op_ctx;
4296
4297 while (pop_item(op_ctx: cur_op_ctx));
4298
4299 item_array_clear(arr: &cur_op_ctx->items);
4300 op_context_array_pop(arr: &frame->pending_ops);
4301 refresh_ctx_pointers(ctx);
4302}
4303
4304static void call_frame_clear(struct call_frame *frame)
4305{
4306 uacpi_size i;
4307 op_context_array_clear(arr: &frame->pending_ops);
4308 code_block_array_clear(arr: &frame->code_blocks);
4309
4310 while (temp_namespace_node_array_size(arr: &frame->temp_nodes) != 0) {
4311 uacpi_namespace_node *node;
4312
4313 node = *temp_namespace_node_array_last(arr: &frame->temp_nodes);
4314 uacpi_node_uninstall(node);
4315 temp_namespace_node_array_pop(arr: &frame->temp_nodes);
4316 }
4317 temp_namespace_node_array_clear(arr: &frame->temp_nodes);
4318
4319 for (i = 0; i < 7; ++i)
4320 uacpi_object_unref(obj: frame->args[i]);
4321 for (i = 0; i < 8; ++i)
4322 uacpi_object_unref(obj: frame->locals[i]);
4323}
4324
4325static uacpi_u8 parse_op_generates_item[0x100] = {
4326 [UACPI_PARSE_OP_SIMPLE_NAME] = ITEM_EMPTY_OBJECT,
4327 [UACPI_PARSE_OP_SUPERNAME] = ITEM_EMPTY_OBJECT,
4328 [UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED] = ITEM_EMPTY_OBJECT,
4329 [UACPI_PARSE_OP_TERM_ARG] = ITEM_EMPTY_OBJECT,
4330 [UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL] = ITEM_EMPTY_OBJECT,
4331 [UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT] = ITEM_EMPTY_OBJECT,
4332 [UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED] = ITEM_EMPTY_OBJECT,
4333 [UACPI_PARSE_OP_OPERAND] = ITEM_EMPTY_OBJECT,
4334 [UACPI_PARSE_OP_STRING] = ITEM_EMPTY_OBJECT,
4335 [UACPI_PARSE_OP_COMPUTATIONAL_DATA] = ITEM_EMPTY_OBJECT,
4336 [UACPI_PARSE_OP_TARGET] = ITEM_EMPTY_OBJECT,
4337 [UACPI_PARSE_OP_PKGLEN] = ITEM_PACKAGE_LENGTH,
4338 [UACPI_PARSE_OP_TRACKED_PKGLEN] = ITEM_PACKAGE_LENGTH,
4339 [UACPI_PARSE_OP_CREATE_NAMESTRING] = ITEM_NAMESPACE_NODE_METHOD_LOCAL,
4340 [UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE_METHOD_LOCAL,
4341 [UACPI_PARSE_OP_EXISTING_NAMESTRING] = ITEM_NAMESPACE_NODE,
4342 [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL] = ITEM_NAMESPACE_NODE,
4343 [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE,
4344 [UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT] = ITEM_OBJECT,
4345 [UACPI_PARSE_OP_LOAD_INLINE_IMM] = ITEM_IMMEDIATE,
4346 [UACPI_PARSE_OP_LOAD_ZERO_IMM] = ITEM_IMMEDIATE,
4347 [UACPI_PARSE_OP_LOAD_IMM] = ITEM_IMMEDIATE,
4348 [UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT] = ITEM_OBJECT,
4349 [UACPI_PARSE_OP_LOAD_FALSE_OBJECT] = ITEM_OBJECT,
4350 [UACPI_PARSE_OP_LOAD_TRUE_OBJECT] = ITEM_OBJECT,
4351 [UACPI_PARSE_OP_OBJECT_ALLOC] = ITEM_OBJECT,
4352 [UACPI_PARSE_OP_OBJECT_ALLOC_TYPED] = ITEM_OBJECT,
4353 [UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC] = ITEM_EMPTY_OBJECT,
4354 [UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY] = ITEM_OBJECT,
4355 [UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY] = ITEM_OBJECT,
4356 [UACPI_PARSE_OP_RECORD_AML_PC] = ITEM_IMMEDIATE,
4357};
4358
4359static const uacpi_u8 *op_decode_cursor(const struct op_context *ctx)
4360{
4361 const struct uacpi_op_spec *spec = ctx->op;
4362
4363 if (spec->properties & UACPI_OP_PROPERTY_OUT_OF_LINE)
4364 return &spec->indirect_decode_ops[ctx->pc];
4365
4366 return &spec->decode_ops[ctx->pc];
4367}
4368
4369static uacpi_u8 op_decode_byte(struct op_context *ctx)
4370{
4371 uacpi_u8 byte;
4372
4373 byte = *op_decode_cursor(ctx);
4374 ctx->pc++;
4375
4376 return byte;
4377}
4378
4379// MSVC doesn't support __VA_OPT__ so we do this weirdness
4380#define EXEC_OP_DO_LVL(lvl, reason, ...) \
4381 uacpi_##lvl("Op 0x%04X ('%s'): "reason"\n", \
4382 op_ctx->op->code, op_ctx->op->name __VA_ARGS__)
4383
4384#define EXEC_OP_DO_ERR(reason, ...) EXEC_OP_DO_LVL(error, reason, __VA_ARGS__)
4385#define EXEC_OP_DO_WARN(reason, ...) EXEC_OP_DO_LVL(warn, reason, __VA_ARGS__)
4386
4387#define EXEC_OP_ERR_2(reason, arg0, arg1) EXEC_OP_DO_ERR(reason, ,arg0, arg1)
4388#define EXEC_OP_ERR_1(reason, arg0) EXEC_OP_DO_ERR(reason, ,arg0)
4389#define EXEC_OP_ERR(reason) EXEC_OP_DO_ERR(reason)
4390
4391#define EXEC_OP_WARN(reason) EXEC_OP_DO_WARN(reason)
4392
4393#define SPEC_SIMPLE_NAME "SimpleName := NameString | ArgObj | LocalObj"
4394#define SPEC_SUPER_NAME \
4395 "SuperName := SimpleName | DebugObj | ReferenceTypeOpcode"
4396#define SPEC_TERM_ARG \
4397 "TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj"
4398#define SPEC_OPERAND "Operand := TermArg => Integer"
4399#define SPEC_STRING "String := TermArg => String"
4400#define SPEC_TARGET "Target := SuperName | NullName"
4401
4402#define SPEC_COMPUTATIONAL_DATA \
4403 "ComputationalData := ByteConst | WordConst | DWordConst | QWordConst " \
4404 "| String | ConstObj | RevisionOp | DefBuffer"
4405
4406static uacpi_bool op_wants_supername(enum uacpi_parse_op op)
4407{
4408 switch (op) {
4409 case UACPI_PARSE_OP_SIMPLE_NAME:
4410 case UACPI_PARSE_OP_SUPERNAME:
4411 case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
4412 case UACPI_PARSE_OP_TARGET:
4413 return UACPI_TRUE;
4414 default:
4415 return UACPI_FALSE;
4416 }
4417}
4418
4419static uacpi_bool op_wants_term_arg_or_operand(enum uacpi_parse_op op)
4420{
4421 switch (op) {
4422 case UACPI_PARSE_OP_TERM_ARG:
4423 case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
4424 case UACPI_PARSE_OP_OPERAND:
4425 case UACPI_PARSE_OP_STRING:
4426 case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
4427 return UACPI_TRUE;
4428 default:
4429 return UACPI_FALSE;
4430 }
4431}
4432
4433static uacpi_bool op_allows_unresolved(enum uacpi_parse_op op)
4434{
4435 switch (op) {
4436 case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
4437 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
4438 case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL:
4439 return UACPI_TRUE;
4440 default:
4441 return UACPI_FALSE;
4442 }
4443}
4444
4445static uacpi_bool op_allows_unresolved_if_load(enum uacpi_parse_op op)
4446{
4447 switch (op) {
4448 case UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD:
4449 case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD:
4450 return UACPI_TRUE;
4451 default:
4452 return UACPI_FALSE;
4453 }
4454}
4455
4456static uacpi_status op_typecheck(const struct op_context *op_ctx,
4457 const struct op_context *cur_op_ctx)
4458{
4459 const uacpi_char *expected_type_str;
4460 uacpi_u8 ok_mask = 0;
4461 uacpi_u8 props = cur_op_ctx->op->properties;
4462
4463 switch (*op_decode_cursor(ctx: op_ctx)) {
4464 // SimpleName := NameString | ArgObj | LocalObj
4465 case UACPI_PARSE_OP_SIMPLE_NAME:
4466 expected_type_str = SPEC_SIMPLE_NAME;
4467 ok_mask |= UACPI_OP_PROPERTY_SIMPLE_NAME;
4468 break;
4469
4470 // Target := SuperName | NullName
4471 case UACPI_PARSE_OP_TARGET:
4472 expected_type_str = SPEC_TARGET;
4473 ok_mask |= UACPI_OP_PROPERTY_TARGET | UACPI_OP_PROPERTY_SUPERNAME;
4474 break;
4475
4476 // SuperName := SimpleName | DebugObj | ReferenceTypeOpcode
4477 case UACPI_PARSE_OP_SUPERNAME:
4478 case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
4479 expected_type_str = SPEC_SUPER_NAME;
4480 ok_mask |= UACPI_OP_PROPERTY_SUPERNAME;
4481 break;
4482
4483 // TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj
4484 case UACPI_PARSE_OP_TERM_ARG:
4485 case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
4486 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
4487 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
4488 case UACPI_PARSE_OP_OPERAND:
4489 case UACPI_PARSE_OP_STRING:
4490 case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
4491 expected_type_str = SPEC_TERM_ARG;
4492 ok_mask |= UACPI_OP_PROPERTY_TERM_ARG;
4493 break;
4494 }
4495
4496 if (!(props & ok_mask)) {
4497 EXEC_OP_ERR_2("invalid argument: '%s', expected a %s",
4498 cur_op_ctx->op->name, expected_type_str);
4499 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
4500 }
4501
4502 return UACPI_STATUS_OK;
4503}
4504
4505static uacpi_status typecheck_obj(
4506 const struct op_context *op_ctx,
4507 const uacpi_object *obj,
4508 enum uacpi_object_type expected_type,
4509 const uacpi_char *spec_desc
4510)
4511{
4512 if (uacpi_likely(obj->type == expected_type))
4513 return UACPI_STATUS_OK;
4514
4515 EXEC_OP_ERR_2("invalid argument type: %s, expected a %s",
4516 uacpi_object_type_to_string(obj->type), spec_desc);
4517 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
4518}
4519
4520static uacpi_status typecheck_operand(
4521 const struct op_context *op_ctx,
4522 const uacpi_object *obj
4523)
4524{
4525 return typecheck_obj(op_ctx, obj, expected_type: UACPI_OBJECT_INTEGER, SPEC_OPERAND);
4526}
4527
4528static uacpi_status typecheck_string(
4529 const struct op_context *op_ctx,
4530 const uacpi_object *obj
4531)
4532{
4533 return typecheck_obj(op_ctx, obj, expected_type: UACPI_OBJECT_STRING, SPEC_STRING);
4534}
4535
4536static uacpi_status typecheck_computational_data(
4537 const struct op_context *op_ctx,
4538 const uacpi_object *obj
4539)
4540{
4541 switch (obj->type) {
4542 case UACPI_OBJECT_STRING:
4543 case UACPI_OBJECT_BUFFER:
4544 case UACPI_OBJECT_INTEGER:
4545 return UACPI_STATUS_OK;
4546 default:
4547 EXEC_OP_ERR_2(
4548 "invalid argument type: %s, expected a %s",
4549 uacpi_object_type_to_string(obj->type),
4550 SPEC_COMPUTATIONAL_DATA
4551 );
4552 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
4553 }
4554}
4555
4556static void trace_named_object_lookup_or_creation_failure(
4557 struct call_frame *frame, uacpi_size offset, enum uacpi_parse_op op,
4558 uacpi_status ret, enum uacpi_log_level level
4559)
4560{
4561 static const uacpi_char *oom_prefix = "<...>";
4562 static const uacpi_char *empty_string = "";
4563 static const uacpi_char *unknown_path = "<unknown-path>";
4564 static const uacpi_char *invalid_path = "<invalid-path>";
4565
4566 uacpi_status conv_ret;
4567 const uacpi_char *action;
4568 const uacpi_char *requested_path_to_print;
4569 const uacpi_char *middle_part = UACPI_NULL;
4570 const uacpi_char *prefix_path = UACPI_NULL;
4571 uacpi_char *requested_path = UACPI_NULL;
4572 uacpi_size length;
4573 uacpi_bool is_create;
4574
4575 is_create = op == UACPI_PARSE_OP_CREATE_NAMESTRING ||
4576 op == UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD;
4577
4578 if (is_create)
4579 action = "create";
4580 else
4581 action = "lookup";
4582
4583 conv_ret = name_string_to_path(
4584 frame, offset, out_string: &requested_path, out_size: &length
4585 );
4586 if (uacpi_unlikely_error(conv_ret)) {
4587 if (conv_ret == UACPI_STATUS_OUT_OF_MEMORY)
4588 requested_path_to_print = unknown_path;
4589 else
4590 requested_path_to_print = invalid_path;
4591 } else {
4592 requested_path_to_print = requested_path;
4593 }
4594
4595 if (requested_path && requested_path[0] != '\\') {
4596 prefix_path = uacpi_namespace_node_generate_absolute_path(
4597 node: frame->cur_scope
4598 );
4599 if (uacpi_unlikely(prefix_path == UACPI_NULL))
4600 prefix_path = oom_prefix;
4601
4602 if (prefix_path[1] != '\0')
4603 middle_part = ".";
4604 } else {
4605 prefix_path = empty_string;
4606 }
4607
4608 if (middle_part == UACPI_NULL)
4609 middle_part = empty_string;
4610
4611 if (length == 5 && !is_create) {
4612 uacpi_log_lvl(
4613 level,
4614 "unable to %s named object '%s' within (or above) "
4615 "scope '%s': %s\n", action, requested_path_to_print,
4616 prefix_path, uacpi_status_to_string(ret)
4617 );
4618 } else {
4619 uacpi_log_lvl(
4620 level,
4621 "unable to %s named object '%s%s%s': %s\n",
4622 action, prefix_path, middle_part,
4623 requested_path_to_print, uacpi_status_to_string(ret)
4624 );
4625 }
4626
4627 uacpi_free(requested_path, length);
4628 if (prefix_path != oom_prefix && prefix_path != empty_string)
4629 uacpi_free_dynamic_string(str: prefix_path);
4630}
4631
4632static uacpi_status uninstalled_op_handler(struct execution_context *ctx)
4633{
4634 struct op_context *op_ctx = ctx->cur_op_ctx;
4635
4636 EXEC_OP_ERR("no dedicated handler installed");
4637 return UACPI_STATUS_UNIMPLEMENTED;
4638}
4639
4640enum op_handler {
4641 OP_HANDLER_UNINSTALLED = 0,
4642 OP_HANDLER_LOCAL,
4643 OP_HANDLER_ARG,
4644 OP_HANDLER_STRING,
4645 OP_HANDLER_BINARY_MATH,
4646 OP_HANDLER_CONTROL_FLOW,
4647 OP_HANDLER_CODE_BLOCK,
4648 OP_HANDLER_RETURN,
4649 OP_HANDLER_CREATE_METHOD,
4650 OP_HANDLER_COPY_OBJECT_OR_STORE,
4651 OP_HANDLER_INC_DEC,
4652 OP_HANDLER_REF_OR_DEREF_OF,
4653 OP_HANDLER_LOGICAL_NOT,
4654 OP_HANDLER_BINARY_LOGIC,
4655 OP_HANDLER_NAMED_OBJECT,
4656 OP_HANDLER_BUFFER,
4657 OP_HANDLER_PACKAGE,
4658 OP_HANDLER_CREATE_NAMED,
4659 OP_HANDLER_CREATE_BUFFER_FIELD,
4660 OP_HANDLER_READ_FIELD,
4661 OP_HANDLER_ALIAS,
4662 OP_HANDLER_CONCATENATE,
4663 OP_HANDLER_CONCATENATE_RES,
4664 OP_HANDLER_SIZEOF,
4665 OP_HANDLER_UNARY_MATH,
4666 OP_HANDLER_INDEX,
4667 OP_HANDLER_OBJECT_TYPE,
4668 OP_HANDLER_CREATE_OP_REGION,
4669 OP_HANDLER_CREATE_DATA_REGION,
4670 OP_HANDLER_CREATE_FIELD,
4671 OP_HANDLER_TO,
4672 OP_HANDLER_TO_STRING,
4673 OP_HANDLER_TIMER,
4674 OP_HANDLER_MID,
4675 OP_HANDLER_MATCH,
4676 OP_HANDLER_CREATE_MUTEX_OR_EVENT,
4677 OP_HANDLER_BCD,
4678 OP_HANDLER_LOAD_TABLE,
4679 OP_HANDLER_LOAD,
4680 OP_HANDLER_STALL_OR_SLEEP,
4681 OP_HANDLER_EVENT_CTL,
4682 OP_HANDLER_MUTEX_CTL,
4683 OP_HANDLER_NOTIFY,
4684 OP_HANDLER_FIRMWARE_REQUEST,
4685};
4686
4687static uacpi_status (*op_handlers[])(struct execution_context *ctx) = {
4688 /*
4689 * All OPs that don't have a handler dispatch to here if
4690 * UACPI_PARSE_OP_INVOKE_HANDLER is reached.
4691 */
4692 [OP_HANDLER_UNINSTALLED] = uninstalled_op_handler,
4693 [OP_HANDLER_LOCAL] = handle_local,
4694 [OP_HANDLER_ARG] = handle_arg,
4695 [OP_HANDLER_NAMED_OBJECT] = handle_named_object,
4696 [OP_HANDLER_STRING] = handle_string,
4697 [OP_HANDLER_BINARY_MATH] = handle_binary_math,
4698 [OP_HANDLER_CONTROL_FLOW] = handle_control_flow,
4699 [OP_HANDLER_CODE_BLOCK] = handle_code_block,
4700 [OP_HANDLER_RETURN] = handle_return,
4701 [OP_HANDLER_CREATE_METHOD] = handle_create_method,
4702 [OP_HANDLER_CREATE_MUTEX_OR_EVENT] = handle_create_mutex_or_event,
4703 [OP_HANDLER_COPY_OBJECT_OR_STORE] = handle_copy_object_or_store,
4704 [OP_HANDLER_INC_DEC] = handle_inc_dec,
4705 [OP_HANDLER_REF_OR_DEREF_OF] = handle_ref_or_deref_of,
4706 [OP_HANDLER_LOGICAL_NOT] = handle_logical_not,
4707 [OP_HANDLER_BINARY_LOGIC] = handle_binary_logic,
4708 [OP_HANDLER_BUFFER] = handle_buffer,
4709 [OP_HANDLER_PACKAGE] = handle_package,
4710 [OP_HANDLER_CREATE_NAMED] = handle_create_named,
4711 [OP_HANDLER_CREATE_BUFFER_FIELD] = handle_create_buffer_field,
4712 [OP_HANDLER_READ_FIELD] = handle_field_read,
4713 [OP_HANDLER_TO] = handle_to,
4714 [OP_HANDLER_ALIAS] = handle_create_alias,
4715 [OP_HANDLER_CONCATENATE] = handle_concatenate,
4716 [OP_HANDLER_CONCATENATE_RES] = handle_concatenate_res,
4717 [OP_HANDLER_SIZEOF] = handle_sizeof,
4718 [OP_HANDLER_UNARY_MATH] = handle_unary_math,
4719 [OP_HANDLER_INDEX] = handle_index,
4720 [OP_HANDLER_OBJECT_TYPE] = handle_object_type,
4721 [OP_HANDLER_CREATE_OP_REGION] = handle_create_op_region,
4722 [OP_HANDLER_CREATE_DATA_REGION] = handle_create_data_region,
4723 [OP_HANDLER_CREATE_FIELD] = handle_create_field,
4724 [OP_HANDLER_TIMER] = handle_timer,
4725 [OP_HANDLER_TO_STRING] = handle_to_string,
4726 [OP_HANDLER_MID] = handle_mid,
4727 [OP_HANDLER_MATCH] = handle_match,
4728 [OP_HANDLER_BCD] = handle_bcd,
4729 [OP_HANDLER_LOAD_TABLE] = handle_load_table,
4730 [OP_HANDLER_LOAD] = handle_load,
4731 [OP_HANDLER_STALL_OR_SLEEP] = handle_stall_or_sleep,
4732 [OP_HANDLER_EVENT_CTL] = handle_event_ctl,
4733 [OP_HANDLER_MUTEX_CTL] = handle_mutex_ctl,
4734 [OP_HANDLER_NOTIFY] = handle_notify,
4735 [OP_HANDLER_FIRMWARE_REQUEST] = handle_firmware_request,
4736};
4737
4738static uacpi_u8 handler_idx_of_op[0x100] = {
4739 [UACPI_AML_OP_Local0Op] = OP_HANDLER_LOCAL,
4740 [UACPI_AML_OP_Local1Op] = OP_HANDLER_LOCAL,
4741 [UACPI_AML_OP_Local2Op] = OP_HANDLER_LOCAL,
4742 [UACPI_AML_OP_Local3Op] = OP_HANDLER_LOCAL,
4743 [UACPI_AML_OP_Local4Op] = OP_HANDLER_LOCAL,
4744 [UACPI_AML_OP_Local5Op] = OP_HANDLER_LOCAL,
4745 [UACPI_AML_OP_Local6Op] = OP_HANDLER_LOCAL,
4746 [UACPI_AML_OP_Local7Op] = OP_HANDLER_LOCAL,
4747
4748 [UACPI_AML_OP_Arg0Op] = OP_HANDLER_ARG,
4749 [UACPI_AML_OP_Arg1Op] = OP_HANDLER_ARG,
4750 [UACPI_AML_OP_Arg2Op] = OP_HANDLER_ARG,
4751 [UACPI_AML_OP_Arg3Op] = OP_HANDLER_ARG,
4752 [UACPI_AML_OP_Arg4Op] = OP_HANDLER_ARG,
4753 [UACPI_AML_OP_Arg5Op] = OP_HANDLER_ARG,
4754 [UACPI_AML_OP_Arg6Op] = OP_HANDLER_ARG,
4755
4756 [UACPI_AML_OP_StringPrefix] = OP_HANDLER_STRING,
4757
4758 [UACPI_AML_OP_AddOp] = OP_HANDLER_BINARY_MATH,
4759 [UACPI_AML_OP_SubtractOp] = OP_HANDLER_BINARY_MATH,
4760 [UACPI_AML_OP_MultiplyOp] = OP_HANDLER_BINARY_MATH,
4761 [UACPI_AML_OP_DivideOp] = OP_HANDLER_BINARY_MATH,
4762 [UACPI_AML_OP_ShiftLeftOp] = OP_HANDLER_BINARY_MATH,
4763 [UACPI_AML_OP_ShiftRightOp] = OP_HANDLER_BINARY_MATH,
4764 [UACPI_AML_OP_AndOp] = OP_HANDLER_BINARY_MATH,
4765 [UACPI_AML_OP_NandOp] = OP_HANDLER_BINARY_MATH,
4766 [UACPI_AML_OP_OrOp] = OP_HANDLER_BINARY_MATH,
4767 [UACPI_AML_OP_NorOp] = OP_HANDLER_BINARY_MATH,
4768 [UACPI_AML_OP_XorOp] = OP_HANDLER_BINARY_MATH,
4769 [UACPI_AML_OP_ModOp] = OP_HANDLER_BINARY_MATH,
4770
4771 [UACPI_AML_OP_IfOp] = OP_HANDLER_CODE_BLOCK,
4772 [UACPI_AML_OP_ElseOp] = OP_HANDLER_CODE_BLOCK,
4773 [UACPI_AML_OP_WhileOp] = OP_HANDLER_CODE_BLOCK,
4774 [UACPI_AML_OP_ScopeOp] = OP_HANDLER_CODE_BLOCK,
4775
4776 [UACPI_AML_OP_ContinueOp] = OP_HANDLER_CONTROL_FLOW,
4777 [UACPI_AML_OP_BreakOp] = OP_HANDLER_CONTROL_FLOW,
4778
4779 [UACPI_AML_OP_ReturnOp] = OP_HANDLER_RETURN,
4780
4781 [UACPI_AML_OP_MethodOp] = OP_HANDLER_CREATE_METHOD,
4782
4783 [UACPI_AML_OP_StoreOp] = OP_HANDLER_COPY_OBJECT_OR_STORE,
4784 [UACPI_AML_OP_CopyObjectOp] = OP_HANDLER_COPY_OBJECT_OR_STORE,
4785
4786 [UACPI_AML_OP_IncrementOp] = OP_HANDLER_INC_DEC,
4787 [UACPI_AML_OP_DecrementOp] = OP_HANDLER_INC_DEC,
4788
4789 [UACPI_AML_OP_RefOfOp] = OP_HANDLER_REF_OR_DEREF_OF,
4790 [UACPI_AML_OP_DerefOfOp] = OP_HANDLER_REF_OR_DEREF_OF,
4791
4792 [UACPI_AML_OP_LnotOp] = OP_HANDLER_LOGICAL_NOT,
4793
4794 [UACPI_AML_OP_LEqualOp] = OP_HANDLER_BINARY_LOGIC,
4795 [UACPI_AML_OP_LandOp] = OP_HANDLER_BINARY_LOGIC,
4796 [UACPI_AML_OP_LorOp] = OP_HANDLER_BINARY_LOGIC,
4797 [UACPI_AML_OP_LGreaterOp] = OP_HANDLER_BINARY_LOGIC,
4798 [UACPI_AML_OP_LLessOp] = OP_HANDLER_BINARY_LOGIC,
4799
4800 [UACPI_AML_OP_InternalOpNamedObject] = OP_HANDLER_NAMED_OBJECT,
4801
4802 [UACPI_AML_OP_BufferOp] = OP_HANDLER_BUFFER,
4803
4804 [UACPI_AML_OP_PackageOp] = OP_HANDLER_PACKAGE,
4805 [UACPI_AML_OP_VarPackageOp] = OP_HANDLER_PACKAGE,
4806
4807 [UACPI_AML_OP_NameOp] = OP_HANDLER_CREATE_NAMED,
4808
4809 [UACPI_AML_OP_CreateBitFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
4810 [UACPI_AML_OP_CreateByteFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
4811 [UACPI_AML_OP_CreateWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
4812 [UACPI_AML_OP_CreateDWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
4813 [UACPI_AML_OP_CreateQWordFieldOp] = OP_HANDLER_CREATE_BUFFER_FIELD,
4814
4815 [UACPI_AML_OP_InternalOpReadFieldAsBuffer] = OP_HANDLER_READ_FIELD,
4816 [UACPI_AML_OP_InternalOpReadFieldAsInteger] = OP_HANDLER_READ_FIELD,
4817
4818 [UACPI_AML_OP_ToIntegerOp] = OP_HANDLER_TO,
4819 [UACPI_AML_OP_ToBufferOp] = OP_HANDLER_TO,
4820 [UACPI_AML_OP_ToDecimalStringOp] = OP_HANDLER_TO,
4821 [UACPI_AML_OP_ToHexStringOp] = OP_HANDLER_TO,
4822 [UACPI_AML_OP_ToStringOp] = OP_HANDLER_TO_STRING,
4823
4824 [UACPI_AML_OP_AliasOp] = OP_HANDLER_ALIAS,
4825
4826 [UACPI_AML_OP_ConcatOp] = OP_HANDLER_CONCATENATE,
4827 [UACPI_AML_OP_ConcatResOp] = OP_HANDLER_CONCATENATE_RES,
4828
4829 [UACPI_AML_OP_SizeOfOp] = OP_HANDLER_SIZEOF,
4830
4831 [UACPI_AML_OP_NotOp] = OP_HANDLER_UNARY_MATH,
4832 [UACPI_AML_OP_FindSetLeftBitOp] = OP_HANDLER_UNARY_MATH,
4833 [UACPI_AML_OP_FindSetRightBitOp] = OP_HANDLER_UNARY_MATH,
4834
4835 [UACPI_AML_OP_IndexOp] = OP_HANDLER_INDEX,
4836
4837 [UACPI_AML_OP_ObjectTypeOp] = OP_HANDLER_OBJECT_TYPE,
4838
4839 [UACPI_AML_OP_MidOp] = OP_HANDLER_MID,
4840
4841 [UACPI_AML_OP_MatchOp] = OP_HANDLER_MATCH,
4842
4843 [UACPI_AML_OP_NotifyOp] = OP_HANDLER_NOTIFY,
4844
4845 [UACPI_AML_OP_BreakPointOp] = OP_HANDLER_FIRMWARE_REQUEST,
4846};
4847
4848#define EXT_OP_IDX(op) (op & 0xFF)
4849
4850static uacpi_u8 handler_idx_of_ext_op[0x100] = {
4851 [EXT_OP_IDX(UACPI_AML_OP_CreateFieldOp)] = OP_HANDLER_CREATE_BUFFER_FIELD,
4852 [EXT_OP_IDX(UACPI_AML_OP_CondRefOfOp)] = OP_HANDLER_REF_OR_DEREF_OF,
4853 [EXT_OP_IDX(UACPI_AML_OP_OpRegionOp)] = OP_HANDLER_CREATE_OP_REGION,
4854 [EXT_OP_IDX(UACPI_AML_OP_DeviceOp)] = OP_HANDLER_CODE_BLOCK,
4855 [EXT_OP_IDX(UACPI_AML_OP_ProcessorOp)] = OP_HANDLER_CODE_BLOCK,
4856 [EXT_OP_IDX(UACPI_AML_OP_PowerResOp)] = OP_HANDLER_CODE_BLOCK,
4857 [EXT_OP_IDX(UACPI_AML_OP_ThermalZoneOp)] = OP_HANDLER_CODE_BLOCK,
4858 [EXT_OP_IDX(UACPI_AML_OP_TimerOp)] = OP_HANDLER_TIMER,
4859 [EXT_OP_IDX(UACPI_AML_OP_MutexOp)] = OP_HANDLER_CREATE_MUTEX_OR_EVENT,
4860 [EXT_OP_IDX(UACPI_AML_OP_EventOp)] = OP_HANDLER_CREATE_MUTEX_OR_EVENT,
4861
4862 [EXT_OP_IDX(UACPI_AML_OP_FieldOp)] = OP_HANDLER_CREATE_FIELD,
4863 [EXT_OP_IDX(UACPI_AML_OP_IndexFieldOp)] = OP_HANDLER_CREATE_FIELD,
4864 [EXT_OP_IDX(UACPI_AML_OP_BankFieldOp)] = OP_HANDLER_CREATE_FIELD,
4865
4866 [EXT_OP_IDX(UACPI_AML_OP_FromBCDOp)] = OP_HANDLER_BCD,
4867 [EXT_OP_IDX(UACPI_AML_OP_ToBCDOp)] = OP_HANDLER_BCD,
4868
4869 [EXT_OP_IDX(UACPI_AML_OP_DataRegionOp)] = OP_HANDLER_CREATE_DATA_REGION,
4870
4871 [EXT_OP_IDX(UACPI_AML_OP_LoadTableOp)] = OP_HANDLER_LOAD_TABLE,
4872 [EXT_OP_IDX(UACPI_AML_OP_LoadOp)] = OP_HANDLER_LOAD,
4873
4874 [EXT_OP_IDX(UACPI_AML_OP_StallOp)] = OP_HANDLER_STALL_OR_SLEEP,
4875 [EXT_OP_IDX(UACPI_AML_OP_SleepOp)] = OP_HANDLER_STALL_OR_SLEEP,
4876
4877 [EXT_OP_IDX(UACPI_AML_OP_SignalOp)] = OP_HANDLER_EVENT_CTL,
4878 [EXT_OP_IDX(UACPI_AML_OP_ResetOp)] = OP_HANDLER_EVENT_CTL,
4879 [EXT_OP_IDX(UACPI_AML_OP_WaitOp)] = OP_HANDLER_EVENT_CTL,
4880
4881 [EXT_OP_IDX(UACPI_AML_OP_AcquireOp)] = OP_HANDLER_MUTEX_CTL,
4882 [EXT_OP_IDX(UACPI_AML_OP_ReleaseOp)] = OP_HANDLER_MUTEX_CTL,
4883
4884 [EXT_OP_IDX(UACPI_AML_OP_FatalOp)] = OP_HANDLER_FIRMWARE_REQUEST,
4885};
4886
4887enum method_call_type {
4888 METHOD_CALL_NATIVE,
4889 METHOD_CALL_AML,
4890 METHOD_CALL_TABLE_LOAD,
4891};
4892
4893static uacpi_status prepare_method_call(
4894 struct execution_context *ctx, uacpi_namespace_node *node,
4895 uacpi_control_method *method, enum method_call_type type,
4896 const uacpi_args *args
4897)
4898{
4899 uacpi_status ret;
4900 struct call_frame *frame;
4901
4902 if (uacpi_unlikely(call_frame_array_size(&ctx->call_stack) >=
4903 g_uacpi_rt_ctx.max_call_stack_depth))
4904 return UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT;
4905
4906 ret = push_new_frame(ctx, out_frame: &frame);
4907 if (uacpi_unlikely_error(ret))
4908 return ret;
4909
4910 ret = enter_method(ctx, new_frame: frame, method);
4911 if (uacpi_unlikely_error(ret))
4912 goto method_dispatch_error;
4913
4914 if (type == METHOD_CALL_NATIVE) {
4915 uacpi_u8 arg_count;
4916
4917 arg_count = args ? args->count : 0;
4918 if (uacpi_unlikely(arg_count != method->args)) {
4919 uacpi_error(
4920 "invalid number of arguments %zu to call %.4s, expected %d\n",
4921 args ? args->count : 0, node->name.text, method->args
4922 );
4923
4924 ret = UACPI_STATUS_INVALID_ARGUMENT;
4925 goto method_dispatch_error;
4926 }
4927
4928 if (args != UACPI_NULL) {
4929 uacpi_u8 i;
4930
4931 for (i = 0; i < method->args; ++i) {
4932 frame->args[i] = args->objects[i];
4933 uacpi_object_ref(obj: args->objects[i]);
4934 }
4935 }
4936 } else if (type == METHOD_CALL_AML) {
4937 ret = frame_push_args(frame, op_ctx: ctx->cur_op_ctx);
4938 if (uacpi_unlikely_error(ret))
4939 goto method_dispatch_error;
4940 }
4941
4942 ret = frame_setup_base_scope(frame, scope: node, method);
4943 if (uacpi_unlikely_error(ret))
4944 goto method_dispatch_error;
4945
4946 ctx->cur_frame = frame;
4947 ctx->cur_op_ctx = UACPI_NULL;
4948 ctx->prev_op_ctx = UACPI_NULL;
4949 ctx->cur_block = code_block_array_last(arr: &ctx->cur_frame->code_blocks);
4950
4951 if (method->native_call) {
4952 uacpi_object *retval;
4953
4954 ret = method_get_ret_object(ctx, out_obj: &retval);
4955 if (uacpi_unlikely_error(ret))
4956 goto method_dispatch_error;
4957
4958 return method->handler(ctx, retval);
4959 }
4960
4961 return UACPI_STATUS_OK;
4962
4963method_dispatch_error:
4964 call_frame_clear(frame);
4965 call_frame_array_pop(arr: &ctx->call_stack);
4966 return ret;
4967}
4968
4969static uacpi_status exec_op(struct execution_context *ctx)
4970{
4971 uacpi_status ret = UACPI_STATUS_OK;
4972 struct call_frame *frame = ctx->cur_frame;
4973 struct op_context *op_ctx;
4974 struct item *item = UACPI_NULL;
4975 enum uacpi_parse_op prev_op = 0, op;
4976
4977 /*
4978 * Allocate a new op context if previous is preempted (looking for a
4979 * dynamic argument), or doesn't exist at all.
4980 */
4981 if (!ctx_has_non_preempted_op(ctx)) {
4982 ret = push_op(ctx);
4983 if (uacpi_unlikely_error(ret))
4984 return ret;
4985 } else {
4986 trace_op(op: ctx->cur_op_ctx->op, action: OP_TRACE_ACTION_RESUME);
4987 }
4988
4989 if (ctx->prev_op_ctx)
4990 prev_op = *op_decode_cursor(ctx: ctx->prev_op_ctx);
4991
4992 for (;;) {
4993 if (uacpi_unlikely_error(ret))
4994 return ret;
4995
4996 op_ctx = ctx->cur_op_ctx;
4997 frame = ctx->cur_frame;
4998
4999 if (op_ctx->pc == 0 && ctx->prev_op_ctx) {
5000 /*
5001 * Type check the current arg type against what is expected by the
5002 * preempted op. This check is able to catch most type violations
5003 * with the only exception being Operand as we only know whether
5004 * that evaluates to an integer after the fact.
5005 */
5006 ret = op_typecheck(op_ctx: ctx->prev_op_ctx, cur_op_ctx: ctx->cur_op_ctx);
5007 if (uacpi_unlikely_error(ret))
5008 return ret;
5009 }
5010
5011 op = op_decode_byte(ctx: op_ctx);
5012 trace_pop(pop: op);
5013
5014 if (parse_op_generates_item[op] != ITEM_NONE) {
5015 item = item_array_alloc(arr: &op_ctx->items);
5016 if (uacpi_unlikely(item == UACPI_NULL))
5017 return UACPI_STATUS_OUT_OF_MEMORY;
5018
5019 item->type = parse_op_generates_item[op];
5020 if (item->type == ITEM_OBJECT) {
5021 enum uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED;
5022
5023 if (op == UACPI_PARSE_OP_OBJECT_ALLOC_TYPED)
5024 type = op_decode_byte(ctx: op_ctx);
5025
5026 item->obj = uacpi_create_object(type);
5027 if (uacpi_unlikely(item->obj == UACPI_NULL))
5028 return UACPI_STATUS_OUT_OF_MEMORY;
5029 } else {
5030 uacpi_memzero(&item->immediate, sizeof(item->immediate));
5031 }
5032 } else if (item == UACPI_NULL) {
5033 item = item_array_last(arr: &op_ctx->items);
5034 }
5035
5036 switch (op) {
5037 case UACPI_PARSE_OP_END:
5038 case UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL: {
5039 trace_op(op: ctx->cur_op_ctx->op, action: OP_TRACE_ACTION_END);
5040
5041 if (op == UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL) {
5042 uacpi_u8 idx;
5043
5044 idx = op_decode_byte(ctx: op_ctx);
5045 if (item_array_at(arr: &op_ctx->items, idx)->handle != UACPI_NULL)
5046 break;
5047
5048 EXEC_OP_WARN("skipping due to previous errors");
5049 }
5050
5051 if (op_ctx->tracked_pkg_idx) {
5052 item = item_array_at(arr: &op_ctx->items, idx: op_ctx->tracked_pkg_idx - 1);
5053 frame->code_offset = item->pkg.end;
5054 }
5055
5056 pop_op(ctx);
5057 if (ctx->cur_op_ctx) {
5058 ctx->cur_op_ctx->preempted = UACPI_FALSE;
5059 ctx->cur_op_ctx->pc++;
5060 }
5061
5062 return UACPI_STATUS_OK;
5063 }
5064
5065 case UACPI_PARSE_OP_SIMPLE_NAME:
5066 case UACPI_PARSE_OP_SUPERNAME:
5067 case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
5068 case UACPI_PARSE_OP_TERM_ARG:
5069 case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
5070 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
5071 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
5072 case UACPI_PARSE_OP_OPERAND:
5073 case UACPI_PARSE_OP_STRING:
5074 case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
5075 case UACPI_PARSE_OP_TARGET:
5076 /*
5077 * Preempt this op parsing for now as we wait for the dynamic arg
5078 * to be parsed.
5079 */
5080 op_ctx->preempted = UACPI_TRUE;
5081 op_ctx->pc--;
5082 return UACPI_STATUS_OK;
5083
5084 case UACPI_PARSE_OP_TRACKED_PKGLEN:
5085 op_ctx->tracked_pkg_idx = item_array_size(arr: &op_ctx->items);
5086 UACPI_FALLTHROUGH;
5087 case UACPI_PARSE_OP_PKGLEN:
5088 ret = parse_package_length(frame, out_pkg: &item->pkg);
5089 break;
5090
5091 case UACPI_PARSE_OP_LOAD_INLINE_IMM:
5092 case UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT: {
5093 void *dst;
5094 uacpi_u8 src_width;
5095
5096 if (op == UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT) {
5097 item->obj->type = UACPI_OBJECT_INTEGER;
5098 dst = &item->obj->integer;
5099 src_width = 8;
5100 } else {
5101 dst = &item->immediate;
5102 src_width = op_decode_byte(ctx: op_ctx);
5103 }
5104
5105 uacpi_memcpy_zerout(
5106 dst, src: op_decode_cursor(ctx: op_ctx),
5107 dst_size: sizeof(uacpi_u64), src_size: src_width
5108 );
5109 op_ctx->pc += src_width;
5110 break;
5111 }
5112
5113 case UACPI_PARSE_OP_LOAD_ZERO_IMM:
5114 break;
5115
5116 case UACPI_PARSE_OP_LOAD_IMM:
5117 case UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT: {
5118 uacpi_u8 width;
5119 void *dst;
5120
5121 width = op_decode_byte(ctx: op_ctx);
5122 if (uacpi_unlikely(call_frame_code_bytes_left(frame) < width))
5123 return UACPI_STATUS_AML_BAD_ENCODING;
5124
5125 if (op == UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT) {
5126 item->obj->type = UACPI_OBJECT_INTEGER;
5127 item->obj->integer = 0;
5128 dst = &item->obj->integer;
5129 } else {
5130 dst = item->immediate_bytes;
5131 }
5132
5133 uacpi_memcpy(dest: dst, src: call_frame_cursor(frame), count: width);
5134 frame->code_offset += width;
5135 break;
5136 }
5137
5138 case UACPI_PARSE_OP_LOAD_FALSE_OBJECT:
5139 case UACPI_PARSE_OP_LOAD_TRUE_OBJECT: {
5140 uacpi_object *obj = item->obj;
5141 obj->type = UACPI_OBJECT_INTEGER;
5142 obj->integer = op == UACPI_PARSE_OP_LOAD_FALSE_OBJECT ? 0 : ones();
5143 break;
5144 }
5145
5146 case UACPI_PARSE_OP_RECORD_AML_PC:
5147 item->immediate = frame->code_offset;
5148 break;
5149
5150 case UACPI_PARSE_OP_TRUNCATE_NUMBER:
5151 truncate_number_if_needed(obj: item->obj);
5152 break;
5153
5154 case UACPI_PARSE_OP_TYPECHECK: {
5155 enum uacpi_object_type expected_type;
5156
5157 expected_type = op_decode_byte(ctx: op_ctx);
5158
5159 if (uacpi_unlikely(item->obj->type != expected_type)) {
5160 EXEC_OP_ERR_2("bad object type: expected %s, got %s!",
5161 uacpi_object_type_to_string(expected_type),
5162 uacpi_object_type_to_string(item->obj->type));
5163 ret = UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
5164 }
5165
5166 break;
5167 }
5168
5169 case UACPI_PARSE_OP_BAD_OPCODE:
5170 case UACPI_PARSE_OP_UNREACHABLE:
5171 EXEC_OP_ERR("invalid/unexpected opcode");
5172 ret = UACPI_STATUS_AML_INVALID_OPCODE;
5173 break;
5174
5175 case UACPI_PARSE_OP_AML_PC_DECREMENT:
5176 frame->code_offset--;
5177 break;
5178
5179 case UACPI_PARSE_OP_IMM_DECREMENT:
5180 item_array_at(arr: &op_ctx->items, idx: op_decode_byte(ctx: op_ctx))->immediate--;
5181 break;
5182
5183 case UACPI_PARSE_OP_ITEM_POP:
5184 pop_item(op_ctx);
5185 break;
5186
5187 case UACPI_PARSE_OP_IF_HAS_DATA: {
5188 uacpi_size pkg_idx = op_ctx->tracked_pkg_idx - 1;
5189 struct package_length *pkg;
5190 uacpi_u8 bytes_skip;
5191
5192 bytes_skip = op_decode_byte(ctx: op_ctx);
5193 pkg = &item_array_at(arr: &op_ctx->items, idx: pkg_idx)->pkg;
5194
5195 if (frame->code_offset >= pkg->end)
5196 op_ctx->pc += bytes_skip;
5197
5198 break;
5199 }
5200
5201 case UACPI_PARSE_OP_IF_NOT_NULL:
5202 case UACPI_PARSE_OP_IF_NULL: {
5203 uacpi_u8 idx, bytes_skip;
5204 uacpi_bool is_null, skip_if_null;
5205
5206 idx = op_decode_byte(ctx: op_ctx);
5207 bytes_skip = op_decode_byte(ctx: op_ctx);
5208
5209 is_null = item_array_at(arr: &op_ctx->items, idx)->handle == UACPI_NULL;
5210 skip_if_null = op == UACPI_PARSE_OP_IF_NOT_NULL;
5211
5212 if (is_null == skip_if_null)
5213 op_ctx->pc += bytes_skip;
5214
5215 break;
5216 }
5217
5218 case UACPI_PARSE_OP_IF_EQUALS: {
5219 uacpi_u8 value, bytes_skip;
5220
5221 value = op_decode_byte(ctx: op_ctx);
5222 bytes_skip = op_decode_byte(ctx: op_ctx);
5223
5224 if (item->immediate != value)
5225 op_ctx->pc += bytes_skip;
5226
5227 break;
5228 }
5229
5230 case UACPI_PARSE_OP_JMP: {
5231 op_ctx->pc = op_decode_byte(ctx: op_ctx);
5232 break;
5233 }
5234
5235 case UACPI_PARSE_OP_CREATE_NAMESTRING:
5236 case UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD:
5237 case UACPI_PARSE_OP_EXISTING_NAMESTRING:
5238 case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL:
5239 case UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD: {
5240 uacpi_size offset = frame->code_offset;
5241 enum resolve_behavior behavior;
5242
5243 if (op == UACPI_PARSE_OP_CREATE_NAMESTRING ||
5244 op == UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD)
5245 behavior = RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS;
5246 else
5247 behavior = RESOLVE_FAIL_IF_DOESNT_EXIST;
5248
5249 ret = resolve_name_string(frame, behavior, out_node: &item->node);
5250
5251 if (ret == UACPI_STATUS_NOT_FOUND) {
5252 uacpi_bool is_ok;
5253
5254 if (prev_op) {
5255 is_ok = op_allows_unresolved(op: prev_op);
5256 is_ok &= op_allows_unresolved(op);
5257 } else {
5258 // This is the only standalone op where we allow unresolved
5259 is_ok = op_ctx->op->code == UACPI_AML_OP_ExternalOp;
5260 }
5261
5262 if (is_ok)
5263 ret = UACPI_STATUS_OK;
5264 }
5265
5266 if (uacpi_unlikely_error(ret)) {
5267 enum uacpi_log_level lvl = UACPI_LOG_ERROR;
5268 uacpi_status trace_ret = ret;
5269
5270 if (ctx->cur_frame->method->named_objects_persist) {
5271 uacpi_bool is_ok;
5272
5273 is_ok = op_allows_unresolved_if_load(op);
5274 is_ok &= ret == UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS ||
5275 ret == UACPI_STATUS_NOT_FOUND;
5276
5277 if (is_ok) {
5278 lvl = UACPI_LOG_WARN;
5279 ret = UACPI_STATUS_OK;
5280 }
5281 }
5282
5283 trace_named_object_lookup_or_creation_failure(
5284 frame, offset, op, ret: trace_ret, level: lvl
5285 );
5286
5287 if (ret == UACPI_STATUS_NOT_FOUND)
5288 ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE;
5289 }
5290
5291 break;
5292 }
5293
5294 case UACPI_PARSE_OP_INVOKE_HANDLER: {
5295 uacpi_aml_op code = op_ctx->op->code;
5296 uacpi_u8 idx;
5297
5298 if (code <= 0xFF)
5299 idx = handler_idx_of_op[code];
5300 else
5301 idx = handler_idx_of_ext_op[EXT_OP_IDX(code)];
5302
5303 ret = op_handlers[idx](ctx);
5304 break;
5305 }
5306
5307 case UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE:
5308 item = item_array_at(arr: &op_ctx->items, idx: op_decode_byte(ctx: op_ctx));
5309 ret = do_install_node_item(frame, item);
5310 break;
5311
5312 case UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV:
5313 case UACPI_PARSE_OP_OBJECT_COPY_TO_PREV: {
5314 uacpi_object *src;
5315 struct item *dst;
5316
5317 if (!ctx->prev_op_ctx)
5318 break;
5319
5320 switch (prev_op) {
5321 case UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL:
5322 case UACPI_PARSE_OP_COMPUTATIONAL_DATA:
5323 case UACPI_PARSE_OP_OPERAND:
5324 case UACPI_PARSE_OP_STRING:
5325 src = uacpi_unwrap_internal_reference(object: item->obj);
5326
5327 if (prev_op == UACPI_PARSE_OP_OPERAND)
5328 ret = typecheck_operand(op_ctx: ctx->prev_op_ctx, obj: src);
5329 else if (prev_op == UACPI_PARSE_OP_STRING)
5330 ret = typecheck_string(op_ctx: ctx->prev_op_ctx, obj: src);
5331 else if (prev_op == UACPI_PARSE_OP_COMPUTATIONAL_DATA)
5332 ret = typecheck_computational_data(op_ctx: ctx->prev_op_ctx, obj: src);
5333
5334 break;
5335 case UACPI_PARSE_OP_SUPERNAME:
5336 case UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED:
5337 src = item->obj;
5338 break;
5339
5340 case UACPI_PARSE_OP_SIMPLE_NAME:
5341 case UACPI_PARSE_OP_TERM_ARG:
5342 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
5343 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
5344 case UACPI_PARSE_OP_TARGET:
5345 src = item->obj;
5346 break;
5347
5348 default:
5349 EXEC_OP_ERR_1("don't know how to copy/transfer object to %d",
5350 prev_op);
5351 ret = UACPI_STATUS_INVALID_ARGUMENT;
5352 break;
5353 }
5354
5355 if (uacpi_likely_success(ret)) {
5356 dst = item_array_last(arr: &ctx->prev_op_ctx->items);
5357 dst->type = ITEM_OBJECT;
5358
5359 if (op == UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV) {
5360 dst->obj = src;
5361 uacpi_object_ref(obj: dst->obj);
5362 } else {
5363 dst->obj = uacpi_create_object(type: UACPI_OBJECT_UNINITIALIZED);
5364 if (uacpi_unlikely(dst->obj == UACPI_NULL)) {
5365 ret = UACPI_STATUS_OUT_OF_MEMORY;
5366 break;
5367 }
5368
5369 ret = uacpi_object_assign(dst: dst->obj, src,
5370 UACPI_ASSIGN_BEHAVIOR_DEEP_COPY);
5371 }
5372 }
5373 break;
5374 }
5375
5376 case UACPI_PARSE_OP_STORE_TO_TARGET:
5377 case UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT: {
5378 uacpi_object *dst, *src;
5379
5380 dst = item_array_at(arr: &op_ctx->items, idx: op_decode_byte(ctx: op_ctx))->obj;
5381
5382 if (op == UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT) {
5383 src = item_array_at(arr: &op_ctx->items,
5384 idx: op_decode_byte(ctx: op_ctx))->obj;
5385 } else {
5386 src = item->obj;
5387 }
5388
5389 ret = store_to_target(dst, src);
5390 break;
5391 }
5392
5393 // Nothing to do here, object is allocated automatically
5394 case UACPI_PARSE_OP_OBJECT_ALLOC:
5395 case UACPI_PARSE_OP_OBJECT_ALLOC_TYPED:
5396 case UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC:
5397 break;
5398
5399 case UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY:
5400 case UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY: {
5401 uacpi_object *temp = item->obj;
5402 enum uacpi_assign_behavior behavior;
5403
5404 item_array_pop(arr: &op_ctx->items);
5405 item = item_array_last(arr: &op_ctx->items);
5406
5407 if (op == UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY)
5408 behavior = UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY;
5409 else
5410 behavior = UACPI_ASSIGN_BEHAVIOR_DEEP_COPY;
5411
5412 ret = uacpi_object_assign(dst: temp, src: item->obj, behavior);
5413 if (uacpi_unlikely_error(ret))
5414 break;
5415
5416 uacpi_object_unref(obj: item->obj);
5417 item->obj = temp;
5418 break;
5419 }
5420
5421 case UACPI_PARSE_OP_DISPATCH_METHOD_CALL: {
5422 struct uacpi_namespace_node *node;
5423 struct uacpi_control_method *method;
5424
5425 node = item_array_at(arr: &op_ctx->items, idx: 0)->node;
5426 method = uacpi_namespace_node_get_object(node)->method;
5427
5428 ret = prepare_method_call(
5429 ctx, node, method, type: METHOD_CALL_AML, UACPI_NULL
5430 );
5431 return ret;
5432 }
5433
5434 case UACPI_PARSE_OP_DISPATCH_TABLE_LOAD: {
5435 struct uacpi_namespace_node *node;
5436 struct uacpi_control_method *method;
5437
5438 node = item_array_at(arr: &op_ctx->items, idx: 0)->node;
5439 method = item_array_at(arr: &op_ctx->items, idx: 1)->obj->method;
5440
5441 ret = prepare_method_call(
5442 ctx, node, method, type: METHOD_CALL_TABLE_LOAD, UACPI_NULL
5443 );
5444 return ret;
5445 }
5446
5447 case UACPI_PARSE_OP_CONVERT_NAMESTRING: {
5448 uacpi_aml_op new_op = UACPI_AML_OP_InternalOpNamedObject;
5449 uacpi_object *obj;
5450
5451 if (item->node == UACPI_NULL) {
5452 if (!op_allows_unresolved(op: prev_op))
5453 ret = UACPI_STATUS_NOT_FOUND;
5454 break;
5455 }
5456
5457 obj = uacpi_namespace_node_get_object(node: item->node);
5458
5459 switch (obj->type) {
5460 case UACPI_OBJECT_METHOD: {
5461 uacpi_bool should_invoke;
5462
5463 switch (prev_op) {
5464 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT:
5465 case UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED:
5466 should_invoke = UACPI_FALSE;
5467 break;
5468 default:
5469 should_invoke = !op_wants_supername(op: prev_op);
5470 }
5471
5472 if (!should_invoke)
5473 break;
5474
5475 new_op = UACPI_AML_OP_InternalOpMethodCall0Args;
5476 new_op += obj->method->args;
5477 break;
5478 }
5479
5480 case UACPI_OBJECT_BUFFER_FIELD:
5481 case UACPI_OBJECT_FIELD_UNIT:
5482 if (!op_wants_term_arg_or_operand(op: prev_op))
5483 break;
5484
5485 switch (field_get_read_type(obj)) {
5486 case UACPI_OBJECT_BUFFER:
5487 new_op = UACPI_AML_OP_InternalOpReadFieldAsBuffer;
5488 break;
5489 case UACPI_OBJECT_INTEGER:
5490 new_op = UACPI_AML_OP_InternalOpReadFieldAsInteger;
5491 break;
5492 default:
5493 ret = UACPI_STATUS_INVALID_ARGUMENT;
5494 continue;
5495 }
5496 break;
5497 default:
5498 break;
5499 }
5500
5501 op_ctx->pc = 0;
5502 op_ctx->op = uacpi_get_op_spec(new_op);
5503 break;
5504 }
5505
5506 default:
5507 EXEC_OP_ERR_1("unhandled parser op '%d'", op);
5508 ret = UACPI_STATUS_UNIMPLEMENTED;
5509 break;
5510 }
5511 }
5512}
5513
5514static void ctx_reload_post_ret(struct execution_context *ctx)
5515{
5516 call_frame_clear(frame: ctx->cur_frame);
5517
5518 if (ctx->cur_frame->method->is_serialized) {
5519 held_mutexes_array_remove_and_release(
5520 arr: &ctx->held_mutexes,
5521 mutex: ctx->cur_frame->method->mutex,
5522 force: FORCE_RELEASE_YES
5523 );
5524 ctx->sync_level = ctx->cur_frame->prev_sync_level;
5525 }
5526
5527 call_frame_array_pop(arr: &ctx->call_stack);
5528
5529 ctx->cur_frame = call_frame_array_last(arr: &ctx->call_stack);
5530 refresh_ctx_pointers(ctx);
5531}
5532
5533static void trace_method_abort(struct code_block *block, uacpi_size depth)
5534{
5535 static const uacpi_char *unknown_path = "<unknown>";
5536 uacpi_char oom_absolute_path[9] = "<?>.";
5537
5538 const uacpi_char *absolute_path;
5539
5540 if (block != UACPI_NULL && block->type == CODE_BLOCK_SCOPE) {
5541 absolute_path = uacpi_namespace_node_generate_absolute_path(node: block->node);
5542 if (uacpi_unlikely(absolute_path == UACPI_NULL))
5543 uacpi_memcpy(dest: oom_absolute_path + 4, src: block->node->name.text, count: 4);
5544 } else {
5545 absolute_path = unknown_path;
5546 }
5547
5548 uacpi_error(" #%zu in %s()\n", depth, absolute_path);
5549
5550 if (absolute_path != oom_absolute_path && absolute_path != unknown_path)
5551 uacpi_free_dynamic_string(str: absolute_path);
5552}
5553
5554static void stack_unwind(struct execution_context *ctx)
5555{
5556 uacpi_size depth;
5557 uacpi_bool should_stop;
5558
5559 /*
5560 * Non-empty call stack here means the execution was aborted at some point,
5561 * probably due to a bytecode error.
5562 */
5563 depth = call_frame_array_size(arr: &ctx->call_stack);
5564
5565 if (depth != 0) {
5566 uacpi_size idx = 0;
5567 uacpi_bool table_level_code;
5568
5569 do {
5570 table_level_code = ctx->cur_frame->method->named_objects_persist;
5571
5572 if (table_level_code && idx != 0)
5573 /*
5574 * This isn't the first frame that we are aborting.
5575 * If this is table-level code, we have just unwound a call
5576 * chain that had triggered an abort. Stop here, no need to
5577 * abort table load because of it.
5578 */
5579 break;
5580
5581 while (op_context_array_size(arr: &ctx->cur_frame->pending_ops) != 0)
5582 pop_op(ctx);
5583
5584 trace_method_abort(
5585 block: code_block_array_at(arr: &ctx->cur_frame->code_blocks, idx: 0), depth: idx
5586 );
5587
5588 should_stop = idx++ == 0 && table_level_code;
5589 ctx_reload_post_ret(ctx);
5590 } while (--depth && !should_stop);
5591 }
5592}
5593
5594static void execution_context_release(struct execution_context *ctx)
5595{
5596 if (ctx->ret)
5597 uacpi_object_unref(obj: ctx->ret);
5598
5599 while (held_mutexes_array_size(arr: &ctx->held_mutexes) != 0) {
5600 held_mutexes_array_remove_and_release(
5601 arr: &ctx->held_mutexes,
5602 mutex: *held_mutexes_array_last(arr: &ctx->held_mutexes),
5603 force: FORCE_RELEASE_YES
5604 );
5605 }
5606
5607 call_frame_array_clear(arr: &ctx->call_stack);
5608 held_mutexes_array_clear(arr: &ctx->held_mutexes);
5609 uacpi_free(ctx, sizeof(*ctx));
5610}
5611
5612uacpi_status uacpi_execute_control_method(
5613 uacpi_namespace_node *scope, uacpi_control_method *method,
5614 const uacpi_args *args, uacpi_object **out_obj
5615)
5616{
5617 uacpi_status ret = UACPI_STATUS_OK;
5618 struct execution_context *ctx;
5619
5620 ctx = uacpi_kernel_calloc(count: 1, size: sizeof(*ctx));
5621 if (uacpi_unlikely(ctx == UACPI_NULL))
5622 return UACPI_STATUS_OUT_OF_MEMORY;
5623
5624 if (out_obj != UACPI_NULL) {
5625 ctx->ret = uacpi_create_object(type: UACPI_OBJECT_UNINITIALIZED);
5626 if (uacpi_unlikely(ctx->ret == UACPI_NULL)) {
5627 ret = UACPI_STATUS_OUT_OF_MEMORY;
5628 goto out;
5629 }
5630 }
5631
5632 ret = prepare_method_call(ctx, node: scope, method, type: METHOD_CALL_NATIVE, args);
5633 if (uacpi_unlikely_error(ret))
5634 goto out;
5635
5636 for (;;) {
5637 if (!ctx_has_non_preempted_op(ctx)) {
5638 if (ctx->cur_frame == UACPI_NULL)
5639 break;
5640
5641 if (maybe_end_block(ctx))
5642 continue;
5643
5644 if (!call_frame_has_code(frame: ctx->cur_frame)) {
5645 ctx_reload_post_ret(ctx);
5646 continue;
5647 }
5648
5649 ret = get_op(ctx);
5650 if (uacpi_unlikely_error(ret))
5651 goto handle_method_abort;
5652
5653 trace_op(op: ctx->cur_op, action: OP_TRACE_ACTION_BEGIN);
5654 }
5655
5656 ret = exec_op(ctx);
5657 if (uacpi_unlikely_error(ret))
5658 goto handle_method_abort;
5659
5660 ctx->skip_else = UACPI_FALSE;
5661 continue;
5662
5663 handle_method_abort:
5664 uacpi_error("aborting %s due to previous error: %s\n",
5665 ctx->cur_frame->method->named_objects_persist ?
5666 "table load" : "method invocation",
5667 uacpi_status_to_string(ret));
5668 stack_unwind(ctx);
5669
5670 /*
5671 * Having a frame here implies that we just aborted a dynamic table
5672 * load. Signal to the caller that it failed by setting the return
5673 * value to false.
5674 */
5675 if (ctx->cur_frame) {
5676 struct item *it;
5677
5678 it = item_array_last(arr: &ctx->cur_op_ctx->items);
5679 if (it != UACPI_NULL && it->obj != UACPI_NULL)
5680 it->obj->integer = 0;
5681 }
5682 }
5683
5684out:
5685 if (ctx->ret != UACPI_NULL) {
5686 uacpi_object *ret_obj = UACPI_NULL;
5687
5688 if (ctx->ret->type != UACPI_OBJECT_UNINITIALIZED) {
5689 ret_obj = ctx->ret;
5690 uacpi_object_ref(obj: ret_obj);
5691 }
5692
5693 *out_obj = ret_obj;
5694 }
5695
5696 execution_context_release(ctx);
5697 return ret;
5698}
5699
5700uacpi_status uacpi_osi(uacpi_handle handle, uacpi_object *retval)
5701{
5702 struct execution_context *ctx = handle;
5703 uacpi_bool is_supported;
5704 uacpi_status ret;
5705 uacpi_object *arg;
5706
5707 arg = uacpi_unwrap_internal_reference(object: ctx->cur_frame->args[0]);
5708 if (arg->type != UACPI_OBJECT_STRING) {
5709 uacpi_error("_OSI: invalid argument type %s, expected a String\n",
5710 uacpi_object_type_to_string(arg->type));
5711 return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE;
5712 }
5713
5714 if (retval == UACPI_NULL)
5715 return UACPI_STATUS_OK;
5716
5717 retval->type = UACPI_OBJECT_INTEGER;
5718
5719 ret = uacpi_handle_osi(string: arg->buffer->text, out_value: &is_supported);
5720 if (uacpi_unlikely_error(ret))
5721 return ret;
5722
5723 retval->integer = is_supported ? ones() : 0;
5724
5725 uacpi_trace("_OSI(%s) => reporting as %ssupported\n",
5726 arg->buffer->text, is_supported ? "" : "un");
5727 return UACPI_STATUS_OK;
5728}
5729