1#include <uacpi/internal/io.h>
2#include <uacpi/internal/stdlib.h>
3#include <uacpi/internal/log.h>
4#include <uacpi/internal/opregion.h>
5#include <uacpi/internal/utilities.h>
6#include <uacpi/internal/mutex.h>
7
8uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length)
9{
10 return UACPI_ALIGN_UP(bit_length, 8, uacpi_size) / 8;
11}
12
13static void cut_misaligned_tail(
14 uacpi_u8 *data, uacpi_size offset, uacpi_u32 bit_length
15)
16{
17 uacpi_u8 remainder = bit_length & 7;
18
19 if (remainder == 0)
20 return;
21
22 data[offset] &= ((1ull << remainder) - 1);
23}
24
25struct bit_span
26{
27 union {
28 uacpi_u8 *data;
29 const uacpi_u8 *const_data;
30 };
31 uacpi_u64 index;
32 uacpi_u64 length;
33};
34
35static uacpi_size bit_span_offset(struct bit_span *span, uacpi_size bits)
36{
37 uacpi_size delta = UACPI_MIN(span->length, bits);
38
39 span->index += delta;
40 span->length -= delta;
41
42 return delta;
43}
44
45static void bit_copy(struct bit_span *dst, struct bit_span *src)
46{
47 uacpi_u8 src_shift, dst_shift, bits = 0;
48 uacpi_u16 dst_mask;
49 uacpi_u8 *dst_ptr, *src_ptr;
50 uacpi_u64 dst_count, src_count;
51
52 dst_ptr = dst->data + (dst->index / 8);
53 src_ptr = src->data + (src->index / 8);
54
55 dst_count = dst->length;
56 dst_shift = dst->index & 7;
57
58 src_count = src->length;
59 src_shift = src->index & 7;
60
61 while (dst_count)
62 {
63 bits = 0;
64
65 if (src_count) {
66 bits = *src_ptr >> src_shift;
67
68 if (src_shift && src_count > (uacpi_u32)(8 - src_shift))
69 bits |= *(src_ptr + 1) << (8 - src_shift);
70
71 if (src_count < 8) {
72 bits &= (1 << src_count) - 1;
73 src_count = 0;
74 } else {
75 src_count -= 8;
76 src_ptr++;
77 }
78 }
79
80 dst_mask = (dst_count < 8 ? (1 << dst_count) - 1 : 0xFF) << dst_shift;
81 *dst_ptr = (*dst_ptr & ~dst_mask) | ((bits << dst_shift) & dst_mask);
82
83 if (dst_shift && dst_count > (uacpi_u32)(8 - dst_shift)) {
84 dst_mask >>= 8;
85 *(dst_ptr + 1) &= ~dst_mask;
86 *(dst_ptr + 1) |= (bits >> (8 - dst_shift)) & dst_mask;
87 }
88
89 dst_count = dst_count > 8 ? dst_count - 8 : 0;
90 ++dst_ptr;
91 }
92}
93
94static void do_misaligned_buffer_read(
95 const uacpi_buffer_field *field, uacpi_u8 *dst
96)
97{
98 struct bit_span src_span = {
99 .index = field->bit_index,
100 .length = field->bit_length,
101 .const_data = field->backing->data,
102 };
103 struct bit_span dst_span = {
104 .data = dst,
105 };
106
107 dst_span.length = uacpi_round_up_bits_to_bytes(bit_length: field->bit_length) * 8;
108 bit_copy(dst: &dst_span, src: &src_span);
109}
110
111void uacpi_read_buffer_field(
112 const uacpi_buffer_field *field, void *dst
113)
114{
115 if (!(field->bit_index & 7)) {
116 uacpi_u8 *src = field->backing->data;
117 uacpi_size count;
118
119 count = uacpi_round_up_bits_to_bytes(bit_length: field->bit_length);
120 uacpi_memcpy(dest: dst, src: src + (field->bit_index / 8), count);
121 cut_misaligned_tail(data: dst, offset: count - 1, bit_length: field->bit_length);
122 return;
123 }
124
125 do_misaligned_buffer_read(field, dst);
126}
127
128static void do_write_misaligned_buffer_field(
129 uacpi_buffer_field *field,
130 const void *src, uacpi_size size
131)
132{
133 struct bit_span src_span = {
134 .length = size * 8,
135 .const_data = src,
136 };
137 struct bit_span dst_span = {
138 .index = field->bit_index,
139 .length = field->bit_length,
140 .data = field->backing->data,
141 };
142
143 bit_copy(dst: &dst_span, src: &src_span);
144}
145
146void uacpi_write_buffer_field(
147 uacpi_buffer_field *field,
148 const void *src, uacpi_size size
149)
150{
151 if (!(field->bit_index & 7)) {
152 uacpi_u8 *dst, last_byte, tail_shift;
153 uacpi_size count;
154
155 dst = field->backing->data;
156 dst += field->bit_index / 8;
157 count = uacpi_round_up_bits_to_bytes(bit_length: field->bit_length);
158
159 last_byte = dst[count - 1];
160 tail_shift = field->bit_length & 7;
161
162 uacpi_memcpy_zerout(dst, src, dst_size: count, src_size: size);
163 if (tail_shift) {
164 uacpi_u8 last_shift = 8 - tail_shift;
165 dst[count - 1] = dst[count - 1] << last_shift;
166 dst[count - 1] >>= last_shift;
167 dst[count - 1] |= (last_byte >> tail_shift) << tail_shift;
168 }
169
170 return;
171 }
172
173 do_write_misaligned_buffer_field(field, src, size);
174}
175
176static uacpi_status dispatch_field_io(
177 uacpi_namespace_node *region_node, uacpi_u32 offset, uacpi_u8 byte_width,
178 uacpi_region_op op, uacpi_u64 *in_out
179)
180{
181 uacpi_status ret;
182 uacpi_operation_region *region;
183 uacpi_address_space_handler *handler;
184 uacpi_u64 offset_end;
185
186 uacpi_region_rw_data data = {
187 .byte_width = byte_width,
188 .offset = offset,
189 };
190
191 ret = uacpi_opregion_attach(node: region_node);
192 if (uacpi_unlikely_error(ret)) {
193 uacpi_trace_region_error(
194 node: region_node, message: "unable to attach", ret
195 );
196 return ret;
197 }
198
199 region = uacpi_namespace_node_get_object(node: region_node)->op_region;
200 handler = region->handler;
201
202 offset_end = offset;
203 offset_end += byte_width;
204 data.offset += region->offset;
205
206 if (uacpi_unlikely(region->length < offset_end ||
207 data.offset < offset)) {
208 const uacpi_char *path;
209
210 path = uacpi_namespace_node_generate_absolute_path(node: region_node);
211 uacpi_error(
212 "out-of-bounds access to opregion %s[0x%"UACPI_PRIX64"->"
213 "0x%"UACPI_PRIX64"] at 0x%"UACPI_PRIX64" (idx=%u, width=%d)\n",
214 path, UACPI_FMT64(region->offset),
215 UACPI_FMT64(region->offset + region->length),
216 UACPI_FMT64(data.offset), offset, byte_width
217 );
218 uacpi_free_dynamic_string(str: path);
219 return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX;
220 }
221
222 data.handler_context = handler->user_context;
223 data.region_context = region->user_context;
224
225 if (op == UACPI_REGION_OP_WRITE) {
226 data.value = *in_out;
227 uacpi_trace_region_io(node: region_node, op, offset: data.offset,
228 byte_size: byte_width, ret: data.value);
229 }
230
231 ret = handler->callback(op, &data);
232 if (uacpi_unlikely_error(ret))
233 return ret;
234
235 if (op == UACPI_REGION_OP_READ) {
236 *in_out = data.value;
237 uacpi_trace_region_io(node: region_node, op, offset: data.offset,
238 byte_size: byte_width, ret: data.value);
239 }
240
241 return UACPI_STATUS_OK;
242}
243
244static uacpi_status access_field_unit(
245 uacpi_field_unit *field, uacpi_u32 offset, uacpi_region_op op,
246 uacpi_u64 *in_out
247)
248{
249 uacpi_status ret = UACPI_STATUS_OK;
250 uacpi_namespace_node *region_node;
251 uacpi_mutex *gl = UACPI_NULL;
252
253 if (field->lock_rule) {
254 uacpi_namespace_node *gl_node;
255 uacpi_object *obj;
256
257 gl_node = uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_GL);
258 obj = uacpi_namespace_node_get_object(node: gl_node);
259
260 if (uacpi_likely(obj != UACPI_NULL && obj->type == UACPI_OBJECT_MUTEX))
261 gl = obj->mutex;
262
263 if (uacpi_unlikely(!uacpi_acquire_aml_mutex(gl, 0xFFFF)))
264 return UACPI_STATUS_INTERNAL_ERROR;
265 }
266
267 switch (field->kind) {
268 case UACPI_FIELD_UNIT_KIND_BANK:
269 ret = uacpi_write_field_unit(
270 field: field->bank_selection, src: &field->bank_value, size: sizeof(field->bank_value)
271 );
272 region_node = field->bank_region;
273 break;
274 case UACPI_FIELD_UNIT_KIND_NORMAL:
275 region_node = field->region;
276 break;
277 case UACPI_FIELD_UNIT_KIND_INDEX:
278 ret = uacpi_write_field_unit(
279 field: field->index, src: &offset, size: sizeof(offset)
280 );
281 if (uacpi_unlikely_error(ret))
282 goto out;
283
284 switch (op) {
285 case UACPI_REGION_OP_READ:
286 return uacpi_read_field_unit(
287 field: field->data, dst: in_out, size: field->access_width_bytes
288 );
289 case UACPI_REGION_OP_WRITE:
290 return uacpi_write_field_unit(
291 field: field->data, src: in_out, size: field->access_width_bytes
292 );
293 default:
294 ret = UACPI_STATUS_INVALID_ARGUMENT;
295 goto out;
296 }
297
298 default:
299 uacpi_error("invalid field unit kind %d\n", field->kind);
300 ret = UACPI_STATUS_INVALID_ARGUMENT;
301 }
302
303 if (uacpi_unlikely_error(ret))
304 goto out;
305
306 ret = dispatch_field_io(
307 region_node, offset, byte_width: field->access_width_bytes, op, in_out
308 );
309
310out:
311 if (gl != UACPI_NULL)
312 uacpi_release_aml_mutex(gl);
313 return ret;
314}
315
316static uacpi_status do_read_misaligned_field_unit(
317 uacpi_field_unit *field, uacpi_u8 *dst, uacpi_size size
318)
319{
320 uacpi_status ret;
321 uacpi_size reads_to_do;
322 uacpi_u64 out;
323 uacpi_u32 byte_offset = field->byte_offset;
324 uacpi_u32 bits_left = field->bit_length;
325 uacpi_u8 width_access_bits = field->access_width_bytes * 8;
326
327 struct bit_span src_span = {
328 .data = (uacpi_u8*)&out,
329 .index = field->bit_offset_within_first_byte,
330 };
331 struct bit_span dst_span = {
332 .data = dst,
333 .index = 0,
334 .length = size * 8
335 };
336
337 reads_to_do = UACPI_ALIGN_UP(
338 field->bit_offset_within_first_byte + field->bit_length,
339 width_access_bits,
340 uacpi_u32
341 );
342 reads_to_do /= width_access_bits;
343
344 while (reads_to_do-- > 0) {
345 src_span.length = UACPI_MIN(
346 bits_left, width_access_bits - src_span.index
347 );
348
349 ret = access_field_unit(
350 field, offset: byte_offset, op: UACPI_REGION_OP_READ,
351 in_out: &out
352 );
353 if (uacpi_unlikely_error(ret))
354 return ret;
355
356 bit_copy(dst: &dst_span, src: &src_span);
357 bits_left -= src_span.length;
358 src_span.index = 0;
359
360 bit_span_offset(span: &dst_span, bits: src_span.length);
361 byte_offset += field->access_width_bytes;
362 }
363
364 return UACPI_STATUS_OK;
365}
366
367uacpi_status uacpi_read_field_unit(
368 uacpi_field_unit *field, void *dst, uacpi_size size
369)
370{
371 uacpi_status ret;
372 uacpi_u32 field_byte_length;
373
374 field_byte_length = uacpi_round_up_bits_to_bytes(bit_length: field->bit_length);
375
376 /*
377 * Very simple fast case:
378 * - Bit offset within first byte is 0
379 * AND
380 * - Field size is <= access width
381 */
382 if (field->bit_offset_within_first_byte == 0 &&
383 field_byte_length <= field->access_width_bytes)
384 {
385 uacpi_u64 out;
386
387 ret = access_field_unit(
388 field, offset: field->byte_offset, op: UACPI_REGION_OP_READ, in_out: &out
389 );
390 if (uacpi_unlikely_error(ret))
391 return ret;
392
393 uacpi_memcpy_zerout(dst, src: &out, dst_size: size, src_size: field_byte_length);
394 if (size >= field_byte_length)
395 cut_misaligned_tail(data: dst, offset: field_byte_length - 1, bit_length: field->bit_length);
396
397 return UACPI_STATUS_OK;
398 }
399
400 // Slow case
401 return do_read_misaligned_field_unit(field, dst, size);
402}
403
404uacpi_status uacpi_write_field_unit(
405 uacpi_field_unit *field, const void *src, uacpi_size size
406)
407{
408 uacpi_status ret;
409 uacpi_u32 bits_left, byte_offset = field->byte_offset;
410 uacpi_u8 width_access_bits = field->access_width_bytes * 8;
411 uacpi_u64 in;
412
413 struct bit_span src_span = {
414 .const_data = src,
415 .index = 0,
416 .length = size * 8
417 };
418 struct bit_span dst_span = {
419 .data = (uacpi_u8*)&in,
420 .index = field->bit_offset_within_first_byte,
421 };
422
423 bits_left = field->bit_length;
424
425 while (bits_left) {
426 in = 0;
427 dst_span.length = UACPI_MIN(
428 width_access_bits - dst_span.index, bits_left
429 );
430
431 if (dst_span.index != 0 || dst_span.length < width_access_bits) {
432 switch (field->update_rule) {
433 case UACPI_UPDATE_RULE_PRESERVE:
434 ret = access_field_unit(
435 field, offset: byte_offset, op: UACPI_REGION_OP_READ, in_out: &in
436 );
437 if (uacpi_unlikely_error(ret))
438 return ret;
439 break;
440 case UACPI_UPDATE_RULE_WRITE_AS_ONES:
441 in = ~in;
442 break;
443 case UACPI_UPDATE_RULE_WRITE_AS_ZEROES:
444 break;
445 default:
446 uacpi_error("invalid field@%p update rule %d\n",
447 field, field->update_rule);
448 return UACPI_STATUS_INVALID_ARGUMENT;
449 }
450 }
451
452 bit_copy(dst: &dst_span, src: &src_span);
453 bit_span_offset(span: &src_span, bits: dst_span.length);
454
455 ret = access_field_unit(
456 field, offset: byte_offset, op: UACPI_REGION_OP_WRITE, in_out: &in
457 );
458 if (uacpi_unlikely_error(ret))
459 return ret;
460
461 bits_left -= dst_span.length;
462 dst_span.index = 0;
463 byte_offset += field->access_width_bytes;
464 }
465
466 return UACPI_STATUS_OK;
467}
468
469static uacpi_u8 gas_get_access_bit_width(const struct acpi_gas *gas)
470{
471 /*
472 * Same algorithm as ACPICA.
473 *
474 * The reason we do this is apparently GAS bit offset being non-zero means
475 * that it's an APEI register, as opposed to FADT, which needs special
476 * handling. In the case of a FADT register we want to ignore the specified
477 * access size.
478 */
479 uacpi_u8 access_bit_width;
480
481 if (gas->register_bit_offset == 0 &&
482 UACPI_IS_POWER_OF_TWO(gas->register_bit_width, uacpi_u8) &&
483 UACPI_IS_ALIGNED(gas->register_bit_width, 8, uacpi_u8)) {
484 access_bit_width = gas->register_bit_width;
485 } else if (gas->access_size) {
486 access_bit_width = gas->access_size * 8;
487 } else {
488 uacpi_u8 msb;
489
490 msb = uacpi_bit_scan_backward(
491 (gas->register_bit_offset + gas->register_bit_width) - 1
492 );
493 access_bit_width = 1 << msb;
494
495 if (access_bit_width <= 8) {
496 access_bit_width = 8;
497 } else {
498 /*
499 * Keep backing off to previous power of two until we find one
500 * that is aligned to the address specified in GAS.
501 */
502 while (!UACPI_IS_ALIGNED(
503 gas->address, access_bit_width / 8, uacpi_u64
504 ))
505 access_bit_width /= 2;
506 }
507 }
508
509 return UACPI_MIN(
510 access_bit_width,
511 gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO ? 32 : 64
512 );
513}
514
515static uacpi_status gas_validate(
516 const struct acpi_gas *gas, uacpi_u8 *access_bit_width
517)
518{
519 uacpi_size total_width;
520
521 if (uacpi_unlikely(gas == UACPI_NULL))
522 return UACPI_STATUS_INVALID_ARGUMENT;
523
524 if (!gas->address)
525 return UACPI_STATUS_NOT_FOUND;
526
527 if (gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_IO &&
528 gas->address_space_id != UACPI_ADDRESS_SPACE_SYSTEM_MEMORY) {
529 uacpi_warn("unsupported GAS address space '%s' (%d)\n",
530 uacpi_address_space_to_string(gas->address_space_id),
531 gas->address_space_id);
532 return UACPI_STATUS_UNIMPLEMENTED;
533 }
534
535 if (gas->access_size > 4) {
536 uacpi_warn("unsupported GAS access size %d\n",
537 gas->access_size);
538 return UACPI_STATUS_UNIMPLEMENTED;
539 }
540
541 *access_bit_width = gas_get_access_bit_width(gas);
542
543 total_width = UACPI_ALIGN_UP(
544 gas->register_bit_offset + gas->register_bit_width,
545 *access_bit_width, uacpi_size
546 );
547 if (total_width > 64) {
548 uacpi_warn(
549 "GAS register total width is too large: %zu\n", total_width
550 );
551 return UACPI_STATUS_UNIMPLEMENTED;
552 }
553
554 return UACPI_STATUS_OK;
555}
556
557/*
558 * Apparently both reading and writing GAS works differently from operation
559 * region in that bit offsets are not respected when writing the data.
560 *
561 * Let's follow ACPICA's approach here so that we don't accidentally
562 * break any quirky hardware.
563 */
564
565uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *out_value)
566{
567 uacpi_status ret;
568 uacpi_u8 access_bit_width, access_byte_width;
569 uacpi_u8 bit_offset, bits_left, index = 0;
570 uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF;
571
572 ret = gas_validate(gas, access_bit_width: &access_bit_width);
573 if (ret != UACPI_STATUS_OK)
574 return ret;
575
576 bit_offset = gas->register_bit_offset;
577 bits_left = bit_offset + gas->register_bit_width;
578
579 access_byte_width = access_bit_width / 8;
580
581 if (access_byte_width < 8)
582 mask = ~(mask << access_bit_width);
583
584 *out_value = 0;
585
586 while (bits_left) {
587 if (bit_offset >= access_bit_width) {
588 data = 0;
589 bit_offset -= access_bit_width;
590 } else {
591 uacpi_u64 address = gas->address + (index * access_byte_width);
592
593 if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO) {
594 ret = uacpi_kernel_raw_io_read(
595 address, byte_width: access_byte_width, out_value: &data
596 );
597 } else {
598 ret = uacpi_kernel_raw_memory_read(
599 address, byte_width: access_byte_width, out_value: &data
600 );
601 }
602 if (uacpi_unlikely_error(ret))
603 return ret;
604 }
605
606 *out_value |= (data & mask) << (index * access_bit_width);
607 bits_left -= UACPI_MIN(bits_left, access_bit_width);
608 ++index;
609 }
610
611 return UACPI_STATUS_OK;
612}
613
614uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 in_value)
615{
616 uacpi_status ret;
617 uacpi_u8 access_bit_width, access_byte_width;
618 uacpi_u8 bit_offset, bits_left, index = 0;
619 uacpi_u64 data, mask = 0xFFFFFFFFFFFFFFFF;
620
621 ret = gas_validate(gas, access_bit_width: &access_bit_width);
622 if (ret != UACPI_STATUS_OK)
623 return ret;
624
625 bit_offset = gas->register_bit_offset;
626 bits_left = bit_offset + gas->register_bit_width;
627 access_byte_width = access_bit_width / 8;
628
629 if (access_byte_width < 8)
630 mask = ~(mask << access_bit_width);
631
632 while (bits_left) {
633 data = (in_value >> (index * access_bit_width)) & mask;
634
635 if (bit_offset >= access_bit_width) {
636 bit_offset -= access_bit_width;
637 } else {
638 uacpi_u64 address = gas->address + (index * access_byte_width);
639
640 if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO) {
641 ret = uacpi_kernel_raw_io_write(
642 address, byte_width: access_byte_width, in_value: data
643 );
644 } else {
645 ret = uacpi_kernel_raw_memory_write(
646 address, byte_width: access_byte_width, in_value: data
647 );
648 }
649 if (uacpi_unlikely_error(ret))
650 return ret;
651 }
652
653 bits_left -= UACPI_MIN(bits_left, access_bit_width);
654 ++index;
655 }
656
657 return UACPI_STATUS_OK;
658}
659