1#include <uacpi/platform/atomic.h>
2#include <uacpi/internal/mutex.h>
3#include <uacpi/internal/log.h>
4#include <uacpi/internal/registers.h>
5#include <uacpi/internal/context.h>
6#include <uacpi/kernel_api.h>
7
8#if UACPI_REDUCED_HARDWARE == 0
9
10#define GLOBAL_LOCK_PENDING (1 << 0)
11
12#define GLOBAL_LOCK_OWNED_BIT 1
13#define GLOBAL_LOCK_OWNED (1 << GLOBAL_LOCK_OWNED_BIT)
14
15#define GLOBAL_LOCK_MASK 0b11u
16
17static uacpi_bool try_acquire_global_lock_from_firmware(uacpi_u32 *lock)
18{
19 uacpi_u32 value, new_value;
20 uacpi_bool was_owned;
21
22 value = *(volatile uacpi_u32*)lock;
23 do {
24 was_owned = (value & GLOBAL_LOCK_OWNED) >> GLOBAL_LOCK_OWNED_BIT;
25
26 // Clear both owned & pending bits.
27 new_value = value & ~GLOBAL_LOCK_MASK;
28
29 // Set owned unconditionally
30 new_value |= GLOBAL_LOCK_OWNED;
31
32 // Set pending iff the lock was owned at the time of reading
33 if (was_owned)
34 new_value |= GLOBAL_LOCK_PENDING;
35 } while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
36
37 return !was_owned;
38}
39
40static uacpi_bool do_release_global_lock_to_firmware(uacpi_u32 *lock)
41{
42 uacpi_u32 value, new_value;
43
44 value = *(volatile uacpi_u32*)lock;
45 do {
46 new_value = value & ~GLOBAL_LOCK_MASK;
47 } while (!uacpi_atomic_cmpxchg32(lock, &value, new_value));
48
49 return value & GLOBAL_LOCK_PENDING;
50}
51
52static uacpi_status uacpi_acquire_global_lock_from_firmware(void)
53{
54 uacpi_cpu_flags flags;
55 uacpi_u16 spins = 0;
56 uacpi_bool success;
57
58 if (!g_uacpi_rt_ctx.has_global_lock)
59 return UACPI_STATUS_OK;
60
61 flags = uacpi_kernel_spinlock_lock(g_uacpi_rt_ctx.global_lock_spinlock);
62 for (;;) {
63 spins++;
64 uacpi_trace(
65 "trying to acquire the global lock from firmware... (attempt %u)\n",
66 spins
67 );
68
69 success = try_acquire_global_lock_from_firmware(
70 lock: &g_uacpi_rt_ctx.facs->global_lock
71 );
72 if (success)
73 break;
74
75 if (uacpi_unlikely(spins == 0xFFFF))
76 break;
77
78 g_uacpi_rt_ctx.global_lock_pending = UACPI_TRUE;
79 uacpi_trace(
80 "global lock is owned by firmware, waiting for a release "
81 "notification...\n"
82 );
83 uacpi_kernel_spinlock_unlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
84
85 uacpi_kernel_wait_for_event(g_uacpi_rt_ctx.global_lock_event, 0xFFFF);
86 flags = uacpi_kernel_spinlock_lock(g_uacpi_rt_ctx.global_lock_spinlock);
87 }
88
89 g_uacpi_rt_ctx.global_lock_pending = UACPI_FALSE;
90 uacpi_kernel_spinlock_unlock(g_uacpi_rt_ctx.global_lock_spinlock, flags);
91
92 if (uacpi_unlikely(!success)) {
93 uacpi_error("unable to acquire global lock after %u attempts\n", spins);
94 return UACPI_STATUS_HARDWARE_TIMEOUT;
95 }
96
97 uacpi_trace("global lock successfully acquired after %u attempt%s\n",
98 spins, spins > 1 ? "s" : "");
99 return UACPI_STATUS_OK;
100}
101
102static void uacpi_release_global_lock_to_firmware(void)
103{
104 if (!g_uacpi_rt_ctx.has_global_lock)
105 return;
106
107 uacpi_trace("releasing the global lock to firmware...\n");
108 if (do_release_global_lock_to_firmware(lock: &g_uacpi_rt_ctx.facs->global_lock)) {
109 uacpi_trace("notifying firmware of the global lock release since the "
110 "pending bit was set\n");
111 uacpi_write_register_field(UACPI_REGISTER_FIELD_GBL_RLS, 1);
112 }
113}
114#endif
115
116UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(
117 uacpi_status uacpi_acquire_global_lock_from_firmware(void)
118)
119UACPI_STUB_IF_REDUCED_HARDWARE(
120 void uacpi_release_global_lock_to_firmware(void)
121)
122
123uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq)
124{
125 uacpi_bool did_acquire;
126 uacpi_status ret;
127
128 UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_TABLES_LOADED);
129
130 if (uacpi_unlikely(out_seq == UACPI_NULL))
131 return UACPI_STATUS_INVALID_ARGUMENT;
132
133 UACPI_MUTEX_ACQUIRE_WITH_TIMEOUT(
134 g_uacpi_rt_ctx.global_lock_mutex, timeout, did_acquire
135 );
136 if (!did_acquire)
137 return UACPI_STATUS_TIMEOUT;
138
139 ret = uacpi_acquire_global_lock_from_firmware();
140 if (uacpi_unlikely_error(ret)) {
141 UACPI_MUTEX_RELEASE(g_uacpi_rt_ctx.global_lock_mutex);
142 return ret;
143 }
144
145 if (uacpi_unlikely(g_uacpi_rt_ctx.global_lock_seq_num == 0xFFFFFFFF))
146 g_uacpi_rt_ctx.global_lock_seq_num = 0;
147
148 *out_seq = g_uacpi_rt_ctx.global_lock_seq_num++;
149 g_uacpi_rt_ctx.global_lock_acquired = UACPI_TRUE;
150 return UACPI_STATUS_OK;
151}
152
153uacpi_status uacpi_release_global_lock(uacpi_u32 seq)
154{
155 UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_TABLES_LOADED);
156
157 if (uacpi_unlikely(!g_uacpi_rt_ctx.global_lock_acquired ||
158 seq != g_uacpi_rt_ctx.global_lock_seq_num))
159 return UACPI_STATUS_INVALID_ARGUMENT;
160
161 g_uacpi_rt_ctx.global_lock_acquired = UACPI_FALSE;
162 uacpi_release_global_lock_to_firmware();
163 UACPI_MUTEX_RELEASE(g_uacpi_rt_ctx.global_lock_mutex);
164
165 return UACPI_STATUS_OK;
166}
167
168uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex *mutex)
169{
170 uacpi_thread_id id;
171
172 id = UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner);
173 return id == uacpi_kernel_get_thread_id();
174}
175
176uacpi_bool uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout)
177{
178 uacpi_thread_id this_id;
179 uacpi_bool did_acquire;
180
181 this_id = uacpi_kernel_get_thread_id();
182 if (UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner) == this_id) {
183 if (uacpi_unlikely(mutex->depth == 0xFFFF)) {
184 uacpi_warn(
185 "failing an attempt to acquire mutex @%p, too many recursive "
186 "acquires\n", mutex
187 );
188 return UACPI_FALSE;
189 }
190
191 mutex->depth++;
192 return UACPI_TRUE;
193 }
194
195 UACPI_MUTEX_ACQUIRE_WITH_TIMEOUT(mutex->handle, timeout, did_acquire);
196 if (!did_acquire)
197 return UACPI_FALSE;
198
199 if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex) {
200 uacpi_status ret;
201
202 ret = uacpi_acquire_global_lock_from_firmware();
203 if (uacpi_unlikely_error(ret)) {
204 UACPI_MUTEX_RELEASE(mutex->handle);
205 return UACPI_FALSE;
206 }
207 }
208
209 UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, this_id);
210 mutex->depth = 1;
211 return UACPI_TRUE;
212}
213
214void uacpi_release_aml_mutex(uacpi_mutex *mutex)
215{
216 if (mutex->depth-- > 1)
217 return;
218
219 if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex)
220 uacpi_release_global_lock_to_firmware();
221
222 UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, UACPI_THREAD_ID_NONE);
223 UACPI_MUTEX_RELEASE(mutex->handle);
224}
225