27 static tid_t next = 1;
28 return (
tid_t){ next++ };
54 pr_dinfo2(thread,
"destroying thread %pt", (
void *) thread);
83 pr_dinfo2(thread,
"creating new thread %pt, owner=%pp", (
void *) t, (
void *) owner);
97 if (!explicit_stack_top)
110 pr_warn(
"invalid stack pointer %pt", explicit_stack_top);
117 pr_warn(
"stack %pt has been claimed by another thread", explicit_stack_top);
124 pr_warn(
"stack %pt is too small (size=%zu, required=%zu)", explicit_stack_top, stack_vmap->
npages *
MOS_PAGE_SIZE, user_stack_size);
131 pr_warn(
"stack %pt is not writable", explicit_stack_top);
153 return ERR_PTR(-EFAULT);
207 pr_dinfo(thread,
"thread %pt is exiting", (
void *) t);
#define MOS_ASSERT_X(cond, msg,...)
#define MOS_UNREACHABLE()
#define MOS_STACK_PAGES_USER
#define MOS_ADDR_USER_STACK
#define MOS_STACK_PAGES_KERNEL
vmap_t * cow_allocate_zeroed_pages(mm_context_t *handle, size_t npages, ptr_t vaddr, valloc_flags hints, vm_flags flags)
Allocate zero-on-demand pages at a specific address.
MOSAPI void stack_init(downwards_stack_t *stack, void *mem_region_bottom, size_t size)
MOSAPI void * hashmap_get(hashmap_t *map, uintn key)
MOSAPI void * hashmap_put(hashmap_t *map, uintn key, void *value)
MOSAPI void * hashmap_remove(hashmap_t *map, uintn key)
MOSAPI char * strdup(const char *src)
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
MOSAPI void list_node_append(list_node_t *head, list_node_t *item)
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
#define phyframe_va(frame)
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
vmap_t * vmap_obtain(mm_context_t *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
vmap_t * vmap_split(vmap_t *vmap, size_t split)
Split a vmap object into two, at the specified offset.
#define mm_free_pages(frame, npages)
void mm_unlock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
phyframe_t * mm_get_free_pages(size_t npages)
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
void mm_lock_ctx_pair(mm_context_t *ctx1, mm_context_t *ctx2)
Lock and unlock a pair of mm_context_t objects.
@ VALLOC_DEFAULT
Default allocation flags.
void define_syscall thread_exit(void)
#define ALIGN_UP_TO_PAGE(addr)
#define pr_dinfo(feat, fmt,...)
#define pr_dinfo2(feat, fmt,...)
__nodiscard bool reschedule_for_waitlist(waitlist_t *waitlist)
void reschedule(void)
reschedule.
should_inline bool spinlock_is_locked(const spinlock_t *lock)
#define spinlock_acquire(lock)
#define spinlock_release(lock)
spinlock_t mm_lock
protects [pgd] and the [mmaps] list (the list itself, not the vmap_t objects)
list_head pending
list of pending signals
thread_mode mode
user-mode thread or kernel-mode
downwards_stack_t u_stack
user-mode stack
waitlist_t waiters
list of threads waiting for this thread to exit
downwards_stack_t k_stack
kernel-mode stack
spinlock_t state_lock
protects the thread state
thread_state_t state
thread state
thread_signal_info_t signal_info
void thread_destroy(thread_t *thread)
thread_t * thread_get(tid_t tid)
thread_t * thread_new(process_t *owner, thread_mode tmode, const char *name, size_t stack_size, void *explicit_stack_top)
bool thread_wait_for_tid(tid_t tid)
thread_t * thread_allocate(process_t *owner, thread_mode tflags)
void thread_exit_locked(thread_t *t)
thread_t * thread_complete_init(thread_t *thread)
static tid_t new_thread_id(void)
#define THREAD_MAGIC_THRD
should_inline bool thread_is_valid(const thread_t *thread)
size_t waitlist_wake(waitlist_t *list, size_t max_wakeups)
__BEGIN_DECLS void waitlist_init(waitlist_t *list)
void waitlist_close(waitlist_t *list)