28 static tid_t next = 1;
29 return (
tid_t) { next++ };
34 pr_emerg(
"thread %p destroyed",
this);
60 pr_dinfo2(thread,
"destroying thread %pt", thread);
68 const auto owner = thread->
owner;
83 pr_dinfo2(thread,
"creating new thread %pt, owner=%pp", t, owner);
97 if (!explicit_stack_top)
100 if (stack_vmap.isErr())
102 pr_emerg(
"failed to allocate stack for new thread");
104 return stack_vmap.getErr();
107 stack_init(&t->u_stack, (
void *) stack_vmap->vaddr, user_stack_size);
117 pr_warn(
"invalid stack pointer %pt", explicit_stack_top);
124 pr_warn(
"stack %pt has been claimed by another thread", explicit_stack_top);
131 pr_warn(
"stack %pt is too small (size=%zu, required=%zu)", explicit_stack_top, stack_vmap->
npages *
MOS_PAGE_SIZE, user_stack_size);
138 pr_warn(
"stack %pt is not writable", explicit_stack_top);
152 stack_init(&t->u_stack, (
void *) stack_bottom, user_stack_size);
153 t->u_stack.head = (
ptr_t) explicit_stack_top;
177 if (ppthread ==
nullptr)
200 pr_warn(
"wait_for_tid(%d) from process %pp but thread belongs to %pp", tid,
current_process, target->owner);
221 pr_dinfo(thread,
"thread %pt is exiting", t);
#define MOS_ASSERT_X(cond, msg,...)
#define MOS_UNREACHABLE()
#define MOS_STACK_PAGES_USER
#define MOS_ADDR_USER_STACK
#define MOS_STACK_PAGES_KERNEL
void push_back(const T &value)
PtrResult< vmap_t > cow_allocate_zeroed_pages(MMContext *handle, size_t npages, ptr_t vaddr, valloc_flags hints, vm_flags flags)
Allocate zero-on-demand pages at a specific address.
MOSAPI void stack_init(downwards_stack_t *stack, void *mem_region_bottom, size_t size)
MOSAPI void linked_list_init(list_node_t *head_node)
Initialise a circular double linked list.
#define list_node(element)
Get the ‘list_node’ of a list element. This is exactly the reverse of ‘list_entry’ above.
#define phyframe_va(frame)
void mm_lock_ctx_pair(MMContext *ctx1, MMContext *ctx2)
Lock and unlock a pair of MMContext objects.
void vmap_finalise_init(vmap_t *vmap, vmap_content_t content, vmap_type_t type)
Finalize the initialization of a vmap object.
vmap_t * vmap_obtain(MMContext *mmctx, ptr_t vaddr, size_t *out_offset)
Get the vmap object for a virtual address.
vmap_t * vmap_split(vmap_t *vmap, size_t split)
Split a vmap object into two, at the specified offset.
#define mm_free_pages(frame, npages)
void mm_unlock_ctx_pair(MMContext *ctx1, MMContext *ctx2)
phyframe_t * mm_get_free_pages(size_t npages)
void vmap_destroy(vmap_t *vmap)
Destroy a vmap object, and unmmap the region.
@ VALLOC_DEFAULT
Default allocation flags.
#define THREAD_MAGIC_THRD
void define_syscall thread_exit(void)
#define ALIGN_UP_TO_PAGE(addr)
basic_string_view< char > string_view
T * create(Args &&...args)
#define pr_emerg(fmt,...)
#define pr_dinfo(feat, fmt,...)
#define pr_dinfo2(feat, fmt,...)
__nodiscard bool reschedule_for_waitlist(waitlist_t *waitlist)
void reschedule(void)
reschedule.
should_inline bool spinlock_is_locked(const spinlock_t *lock)
#define spinlock_acquire(lock)
#define spinlock_release(lock)
mos::list< Thread * > thread_list
thread_mode mode
user-mode thread or kernel-mode
downwards_stack_t u_stack
user-mode stack
downwards_stack_t k_stack
kernel-mode stack
spinlock_t state_lock
protects the thread state
thread_state_t state
thread state
PtrResult< Thread > thread_new(Process *owner, thread_mode tmode, mos::string_view name, size_t stack_size, void *explicit_stack_top)
void thread_destroy(Thread *thread)
Thread * thread_allocate(Process *owner, thread_mode tflags)
bool thread_wait_for_tid(tid_t tid)
void thread_exit_locked(Thread *&&t)
Thread * thread_get(tid_t tid)
Thread * thread_complete_init(Thread *thread)
static tid_t new_thread_id(void)
should_inline bool thread_is_valid(const Thread *thread)
mos::HashMap< tid_t, Thread * > thread_table
void waitlist_init(waitlist_t *list)
size_t waitlist_wake(waitlist_t *list, size_t max_wakeups)
void waitlist_close(waitlist_t *list)