Ruby 3.4.4p34 (2025-05-14 revision a38531fd3f617bf734ef7d6c595325f69985ea1d)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "rjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_RJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119static uint32_t thread_default_quantum_ms = 100;
120
121#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
122#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
123
124static inline VALUE
125rb_thread_local_storage(VALUE thread)
126{
127 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
128 rb_ivar_set(thread, idLocals, rb_hash_new());
129 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
130 }
131 return rb_ivar_get(thread, idLocals);
132}
133
134enum SLEEP_FLAGS {
135 SLEEP_DEADLOCKABLE = 0x01,
136 SLEEP_SPURIOUS_CHECK = 0x02,
137 SLEEP_ALLOW_SPURIOUS = 0x04,
138 SLEEP_NO_CHECKINTS = 0x08,
139};
140
141static void sleep_forever(rb_thread_t *th, unsigned int fl);
142static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
143
144static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
145static int rb_threadptr_dead(rb_thread_t *th);
146static void rb_check_deadlock(rb_ractor_t *r);
147static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
148static const char *thread_status_name(rb_thread_t *th, int detail);
149static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
150NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
151MAYBE_UNUSED(static int consume_communication_pipe(int fd));
152
153static volatile int system_working = 1;
154static rb_internal_thread_specific_key_t specific_key_count;
155
157 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
158 rb_thread_t *th;
159 int fd;
160 struct rb_io_close_wait_list *busy;
161};
162
163/********************************************************************************/
164
165#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
166
168 enum rb_thread_status prev_status;
169};
170
171static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
172static void unblock_function_clear(rb_thread_t *th);
173
174static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
175 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
176static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
177
178#define THREAD_BLOCKING_BEGIN(th) do { \
179 struct rb_thread_sched * const sched = TH_SCHED(th); \
180 RB_VM_SAVE_MACHINE_CONTEXT(th); \
181 thread_sched_to_waiting((sched), (th));
182
183#define THREAD_BLOCKING_END(th) \
184 thread_sched_to_running((sched), (th)); \
185 rb_ractor_thread_switch(th->ractor, th); \
186} while(0)
187
188#ifdef __GNUC__
189#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
190#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
191#else
192#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
193#endif
194#else
195#define only_if_constant(expr, notconst) notconst
196#endif
197#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
198 struct rb_blocking_region_buffer __region; \
199 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
200 /* always return true unless fail_if_interrupted */ \
201 !only_if_constant(fail_if_interrupted, TRUE)) { \
202 /* Important that this is inlined into the macro, and not part of \
203 * blocking_region_begin - see bug #20493 */ \
204 RB_VM_SAVE_MACHINE_CONTEXT(th); \
205 thread_sched_to_waiting(TH_SCHED(th), th); \
206 exec; \
207 blocking_region_end(th, &__region); \
208 }; \
209} while(0)
210
211/*
212 * returns true if this thread was spuriously interrupted, false otherwise
213 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
214 */
215#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
216static inline int
217vm_check_ints_blocking(rb_execution_context_t *ec)
218{
219 rb_thread_t *th = rb_ec_thread_ptr(ec);
220
221 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
222 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
223 }
224 else {
225 th->pending_interrupt_queue_checked = 0;
226 RUBY_VM_SET_INTERRUPT(ec);
227 }
228 return rb_threadptr_execute_interrupts(th, 1);
229}
230
231int
232rb_vm_check_ints_blocking(rb_execution_context_t *ec)
233{
234 return vm_check_ints_blocking(ec);
235}
236
237/*
238 * poll() is supported by many OSes, but so far Linux is the only
239 * one we know of that supports using poll() in all places select()
240 * would work.
241 */
242#if defined(HAVE_POLL)
243# if defined(__linux__)
244# define USE_POLL
245# endif
246# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
247# define USE_POLL
248 /* FreeBSD does not set POLLOUT when POLLHUP happens */
249# define POLLERR_SET (POLLHUP | POLLERR)
250# endif
251#endif
252
253static void
254timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
255 const struct timeval *timeout)
256{
257 if (timeout) {
258 *rel = rb_timeval2hrtime(timeout);
259 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
260 *to = rel;
261 }
262 else {
263 *to = 0;
264 }
265}
266
267MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
268MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
269MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
270
271#include THREAD_IMPL_SRC
272
273/*
274 * TODO: somebody with win32 knowledge should be able to get rid of
275 * timer-thread by busy-waiting on signals. And it should be possible
276 * to make the GVL in thread_pthread.c be platform-independent.
277 */
278#ifndef BUSY_WAIT_SIGNALS
279# define BUSY_WAIT_SIGNALS (0)
280#endif
281
282#ifndef USE_EVENTFD
283# define USE_EVENTFD (0)
284#endif
285
286#include "thread_sync.c"
287
288void
289rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
290{
292}
293
294void
295rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
296{
298}
299
300void
301rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
302{
304}
305
306void
307rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
308{
310}
311
312static int
313unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
314{
315 do {
316 if (fail_if_interrupted) {
317 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
318 return FALSE;
319 }
320 }
321 else {
322 RUBY_VM_CHECK_INTS(th->ec);
323 }
324
325 rb_native_mutex_lock(&th->interrupt_lock);
326 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
327 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
328
329 VM_ASSERT(th->unblock.func == NULL);
330
331 th->unblock.func = func;
332 th->unblock.arg = arg;
333 rb_native_mutex_unlock(&th->interrupt_lock);
334
335 return TRUE;
336}
337
338static void
339unblock_function_clear(rb_thread_t *th)
340{
341 rb_native_mutex_lock(&th->interrupt_lock);
342 th->unblock.func = 0;
343 rb_native_mutex_unlock(&th->interrupt_lock);
344}
345
346static void
347threadptr_interrupt_locked(rb_thread_t *th, bool trap)
348{
349 // th->interrupt_lock should be acquired here
350
351 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
352
353 if (trap) {
354 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
355 }
356 else {
357 RUBY_VM_SET_INTERRUPT(th->ec);
358 }
359
360 if (th->unblock.func != NULL) {
361 (th->unblock.func)(th->unblock.arg);
362 }
363 else {
364 /* none */
365 }
366}
367
368static void
369threadptr_interrupt(rb_thread_t *th, int trap)
370{
371 rb_native_mutex_lock(&th->interrupt_lock);
372 {
373 threadptr_interrupt_locked(th, trap);
374 }
375 rb_native_mutex_unlock(&th->interrupt_lock);
376}
377
378void
379rb_threadptr_interrupt(rb_thread_t *th)
380{
381 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
382 threadptr_interrupt(th, false);
383}
384
385static void
386threadptr_trap_interrupt(rb_thread_t *th)
387{
388 threadptr_interrupt(th, true);
389}
390
391static void
392terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
393{
394 rb_thread_t *th = 0;
395
396 ccan_list_for_each(&r->threads.set, th, lt_node) {
397 if (th != main_thread) {
398 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
399
400 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
401 rb_threadptr_interrupt(th);
402
403 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
404 }
405 else {
406 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
407 }
408 }
409}
410
411static void
412rb_threadptr_join_list_wakeup(rb_thread_t *thread)
413{
414 while (thread->join_list) {
415 struct rb_waiting_list *join_list = thread->join_list;
416
417 // Consume the entry from the join list:
418 thread->join_list = join_list->next;
419
420 rb_thread_t *target_thread = join_list->thread;
421
422 if (target_thread->scheduler != Qnil && join_list->fiber) {
423 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
424 }
425 else {
426 rb_threadptr_interrupt(target_thread);
427
428 switch (target_thread->status) {
429 case THREAD_STOPPED:
430 case THREAD_STOPPED_FOREVER:
431 target_thread->status = THREAD_RUNNABLE;
432 break;
433 default:
434 break;
435 }
436 }
437 }
438}
439
440void
441rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
442{
443 while (th->keeping_mutexes) {
444 rb_mutex_t *mutex = th->keeping_mutexes;
445 th->keeping_mutexes = mutex->next_mutex;
446
447 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
448
449 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
450 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
451 }
452}
453
454void
455rb_thread_terminate_all(rb_thread_t *th)
456{
457 rb_ractor_t *cr = th->ractor;
458 rb_execution_context_t * volatile ec = th->ec;
459 volatile int sleeping = 0;
460
461 if (cr->threads.main != th) {
462 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
463 (void *)cr->threads.main, (void *)th);
464 }
465
466 /* unlock all locking mutexes */
467 rb_threadptr_unlock_all_locking_mutexes(th);
468
469 EC_PUSH_TAG(ec);
470 if (EC_EXEC_TAG() == TAG_NONE) {
471 retry:
472 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
473
474 terminate_all(cr, th);
475
476 while (rb_ractor_living_thread_num(cr) > 1) {
477 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
478 /*q
479 * Thread exiting routine in thread_start_func_2 notify
480 * me when the last sub-thread exit.
481 */
482 sleeping = 1;
483 native_sleep(th, &rel);
484 RUBY_VM_CHECK_INTS_BLOCKING(ec);
485 sleeping = 0;
486 }
487 }
488 else {
489 /*
490 * When caught an exception (e.g. Ctrl+C), let's broadcast
491 * kill request again to ensure killing all threads even
492 * if they are blocked on sleep, mutex, etc.
493 */
494 if (sleeping) {
495 sleeping = 0;
496 goto retry;
497 }
498 }
499 EC_POP_TAG();
500}
501
502void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
503static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
504
505static void
506thread_cleanup_func_before_exec(void *th_ptr)
507{
508 rb_thread_t *th = th_ptr;
509 th->status = THREAD_KILLED;
510
511 // The thread stack doesn't exist in the forked process:
512 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
513
514 threadptr_interrupt_exec_cleanup(th);
515 rb_threadptr_root_fiber_terminate(th);
516}
517
518static void
519thread_cleanup_func(void *th_ptr, int atfork)
520{
521 rb_thread_t *th = th_ptr;
522
523 th->locking_mutex = Qfalse;
524 thread_cleanup_func_before_exec(th_ptr);
525
526 /*
527 * Unfortunately, we can't release native threading resource at fork
528 * because libc may have unstable locking state therefore touching
529 * a threading resource may cause a deadlock.
530 */
531 if (atfork) {
532 th->nt = NULL;
533 return;
534 }
535
536 rb_native_mutex_destroy(&th->interrupt_lock);
537}
538
539static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
540static VALUE rb_thread_to_s(VALUE thread);
541
542void
543ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
544{
545 native_thread_init_stack(th, local_in_parent_frame);
546}
547
548const VALUE *
549rb_vm_proc_local_ep(VALUE proc)
550{
551 const VALUE *ep = vm_proc_ep(proc);
552
553 if (ep) {
554 return rb_vm_ep_local_ep(ep);
555 }
556 else {
557 return NULL;
558 }
559}
560
561// for ractor, defined in vm.c
562VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
563 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
564
565static VALUE
566thread_do_start_proc(rb_thread_t *th)
567{
568 VALUE args = th->invoke_arg.proc.args;
569 const VALUE *args_ptr;
570 int args_len;
571 VALUE procval = th->invoke_arg.proc.proc;
572 rb_proc_t *proc;
573 GetProcPtr(procval, proc);
574
575 th->ec->errinfo = Qnil;
576 th->ec->root_lep = rb_vm_proc_local_ep(procval);
577 th->ec->root_svar = Qfalse;
578
579 vm_check_ints_blocking(th->ec);
580
581 if (th->invoke_type == thread_invoke_type_ractor_proc) {
582 VALUE self = rb_ractor_self(th->ractor);
583 VM_ASSERT(FIXNUM_P(args));
584 args_len = FIX2INT(args);
585 args_ptr = ALLOCA_N(VALUE, args_len);
586 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
587 vm_check_ints_blocking(th->ec);
588
589 return rb_vm_invoke_proc_with_self(
590 th->ec, proc, self,
591 args_len, args_ptr,
592 th->invoke_arg.proc.kw_splat,
593 VM_BLOCK_HANDLER_NONE
594 );
595 }
596 else {
597 args_len = RARRAY_LENINT(args);
598 if (args_len < 8) {
599 /* free proc.args if the length is enough small */
600 args_ptr = ALLOCA_N(VALUE, args_len);
601 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
602 th->invoke_arg.proc.args = Qnil;
603 }
604 else {
605 args_ptr = RARRAY_CONST_PTR(args);
606 }
607
608 vm_check_ints_blocking(th->ec);
609
610 return rb_vm_invoke_proc(
611 th->ec, proc,
612 args_len, args_ptr,
613 th->invoke_arg.proc.kw_splat,
614 VM_BLOCK_HANDLER_NONE
615 );
616 }
617}
618
619static VALUE
620thread_do_start(rb_thread_t *th)
621{
622 native_set_thread_name(th);
623 VALUE result = Qundef;
624
625 switch (th->invoke_type) {
626 case thread_invoke_type_proc:
627 result = thread_do_start_proc(th);
628 break;
629
630 case thread_invoke_type_ractor_proc:
631 result = thread_do_start_proc(th);
632 rb_ractor_atexit(th->ec, result);
633 break;
634
635 case thread_invoke_type_func:
636 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
637 break;
638
639 case thread_invoke_type_none:
640 rb_bug("unreachable");
641 }
642
643 return result;
644}
645
646void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
647
648static int
649thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
650{
651 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
652 VM_ASSERT(th != th->vm->ractor.main_thread);
653
654 enum ruby_tag_type state;
655 VALUE errinfo = Qnil;
656 rb_thread_t *ractor_main_th = th->ractor->threads.main;
657
658 // setup ractor
659 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
660 RB_VM_LOCK();
661 {
662 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
663 rb_ractor_t *r = th->ractor;
664 r->r_stdin = rb_io_prep_stdin();
665 r->r_stdout = rb_io_prep_stdout();
666 r->r_stderr = rb_io_prep_stderr();
667 }
668 RB_VM_UNLOCK();
669 }
670
671 // Ensure that we are not joinable.
672 VM_ASSERT(UNDEF_P(th->value));
673
674 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
675 VALUE result = Qundef;
676
677 EC_PUSH_TAG(th->ec);
678
679 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
680 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
681
682 result = thread_do_start(th);
683 }
684
685 if (!fiber_scheduler_closed) {
686 fiber_scheduler_closed = 1;
688 }
689
690 if (!event_thread_end_hooked) {
691 event_thread_end_hooked = 1;
692 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
693 }
694
695 if (state == TAG_NONE) {
696 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
697 th->value = result;
698 } else {
699 errinfo = th->ec->errinfo;
700
701 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
702 if (!NIL_P(exc)) errinfo = exc;
703
704 if (state == TAG_FATAL) {
705 if (th->invoke_type == thread_invoke_type_ractor_proc) {
706 rb_ractor_atexit(th->ec, Qnil);
707 }
708 /* fatal error within this thread, need to stop whole script */
709 }
710 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
711 /* exit on main_thread. */
712 }
713 else {
714 if (th->report_on_exception) {
715 VALUE mesg = rb_thread_to_s(th->self);
716 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
717 rb_write_error_str(mesg);
718 rb_ec_error_print(th->ec, errinfo);
719 }
720
721 if (th->invoke_type == thread_invoke_type_ractor_proc) {
722 rb_ractor_atexit_exception(th->ec);
723 }
724
725 if (th->vm->thread_abort_on_exception ||
726 th->abort_on_exception || RTEST(ruby_debug)) {
727 /* exit on main_thread */
728 }
729 else {
730 errinfo = Qnil;
731 }
732 }
733 th->value = Qnil;
734 }
735
736 // The thread is effectively finished and can be joined.
737 VM_ASSERT(!UNDEF_P(th->value));
738
739 rb_threadptr_join_list_wakeup(th);
740 rb_threadptr_unlock_all_locking_mutexes(th);
741
742 if (th->invoke_type == thread_invoke_type_ractor_proc) {
743 rb_thread_terminate_all(th);
744 rb_ractor_teardown(th->ec);
745 }
746
747 th->status = THREAD_KILLED;
748 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
749
750 if (th->vm->ractor.main_thread == th) {
751 ruby_stop(0);
752 }
753
754 if (RB_TYPE_P(errinfo, T_OBJECT)) {
755 /* treat with normal error object */
756 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
757 }
758
759 EC_POP_TAG();
760
761 rb_ec_clear_current_thread_trace_func(th->ec);
762
763 /* locking_mutex must be Qfalse */
764 if (th->locking_mutex != Qfalse) {
765 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
766 (void *)th, th->locking_mutex);
767 }
768
769 if (ractor_main_th->status == THREAD_KILLED &&
770 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
771 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
772 rb_threadptr_interrupt(ractor_main_th);
773 }
774
775 rb_check_deadlock(th->ractor);
776
777 rb_fiber_close(th->ec->fiber_ptr);
778
779 thread_cleanup_func(th, FALSE);
780 VM_ASSERT(th->ec->vm_stack == NULL);
781
782 if (th->invoke_type == thread_invoke_type_ractor_proc) {
783 // after rb_ractor_living_threads_remove()
784 // GC will happen anytime and this ractor can be collected (and destroy GVL).
785 // So gvl_release() should be before it.
786 thread_sched_to_dead(TH_SCHED(th), th);
787 rb_ractor_living_threads_remove(th->ractor, th);
788 }
789 else {
790 rb_ractor_living_threads_remove(th->ractor, th);
791 thread_sched_to_dead(TH_SCHED(th), th);
792 }
793
794 return 0;
795}
798 enum thread_invoke_type type;
799
800 // for normal proc thread
801 VALUE args;
802 VALUE proc;
803
804 // for ractor
805 rb_ractor_t *g;
806
807 // for func
808 VALUE (*fn)(void *);
809};
810
811static void thread_specific_storage_alloc(rb_thread_t *th);
812
813static VALUE
814thread_create_core(VALUE thval, struct thread_create_params *params)
815{
816 rb_execution_context_t *ec = GET_EC();
817 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
818 int err;
819
820 thread_specific_storage_alloc(th);
821
822 if (OBJ_FROZEN(current_th->thgroup)) {
823 rb_raise(rb_eThreadError,
824 "can't start a new thread (frozen ThreadGroup)");
825 }
826
827 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
828
829 switch (params->type) {
830 case thread_invoke_type_proc:
831 th->invoke_type = thread_invoke_type_proc;
832 th->invoke_arg.proc.args = params->args;
833 th->invoke_arg.proc.proc = params->proc;
834 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
835 break;
836
837 case thread_invoke_type_ractor_proc:
838#if RACTOR_CHECK_MODE > 0
839 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
840#endif
841 th->invoke_type = thread_invoke_type_ractor_proc;
842 th->ractor = params->g;
843 th->ractor->threads.main = th;
844 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
845 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
846 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
847 rb_ractor_send_parameters(ec, params->g, params->args);
848 break;
849
850 case thread_invoke_type_func:
851 th->invoke_type = thread_invoke_type_func;
852 th->invoke_arg.func.func = params->fn;
853 th->invoke_arg.func.arg = (void *)params->args;
854 break;
855
856 default:
857 rb_bug("unreachable");
858 }
859
860 th->priority = current_th->priority;
861 th->thgroup = current_th->thgroup;
862
863 th->pending_interrupt_queue = rb_ary_hidden_new(0);
864 th->pending_interrupt_queue_checked = 0;
865 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
866 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
867
868 rb_native_mutex_initialize(&th->interrupt_lock);
869
870 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
871
872 rb_ractor_living_threads_insert(th->ractor, th);
873
874 /* kick thread */
875 err = native_thread_create(th);
876 if (err) {
877 th->status = THREAD_KILLED;
878 rb_ractor_living_threads_remove(th->ractor, th);
879 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
880 }
881 return thval;
882}
883
884#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
885
886/*
887 * call-seq:
888 * Thread.new { ... } -> thread
889 * Thread.new(*args, &proc) -> thread
890 * Thread.new(*args) { |args| ... } -> thread
891 *
892 * Creates a new thread executing the given block.
893 *
894 * Any +args+ given to ::new will be passed to the block:
895 *
896 * arr = []
897 * a, b, c = 1, 2, 3
898 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
899 * arr #=> [1, 2, 3]
900 *
901 * A ThreadError exception is raised if ::new is called without a block.
902 *
903 * If you're going to subclass Thread, be sure to call super in your
904 * +initialize+ method, otherwise a ThreadError will be raised.
905 */
906static VALUE
907thread_s_new(int argc, VALUE *argv, VALUE klass)
908{
909 rb_thread_t *th;
910 VALUE thread = rb_thread_alloc(klass);
911
912 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
913 rb_raise(rb_eThreadError, "can't alloc thread");
914 }
915
916 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
917 th = rb_thread_ptr(thread);
918 if (!threadptr_initialized(th)) {
919 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
920 klass);
921 }
922 return thread;
923}
924
925/*
926 * call-seq:
927 * Thread.start([args]*) {|args| block } -> thread
928 * Thread.fork([args]*) {|args| block } -> thread
929 *
930 * Basically the same as ::new. However, if class Thread is subclassed, then
931 * calling +start+ in that subclass will not invoke the subclass's
932 * +initialize+ method.
933 */
934
935static VALUE
936thread_start(VALUE klass, VALUE args)
937{
938 struct thread_create_params params = {
939 .type = thread_invoke_type_proc,
940 .args = args,
941 .proc = rb_block_proc(),
942 };
943 return thread_create_core(rb_thread_alloc(klass), &params);
944}
945
946static VALUE
947threadptr_invoke_proc_location(rb_thread_t *th)
948{
949 if (th->invoke_type == thread_invoke_type_proc) {
950 return rb_proc_location(th->invoke_arg.proc.proc);
951 }
952 else {
953 return Qnil;
954 }
955}
956
957/* :nodoc: */
958static VALUE
959thread_initialize(VALUE thread, VALUE args)
960{
961 rb_thread_t *th = rb_thread_ptr(thread);
962
963 if (!rb_block_given_p()) {
964 rb_raise(rb_eThreadError, "must be called with a block");
965 }
966 else if (th->invoke_type != thread_invoke_type_none) {
967 VALUE loc = threadptr_invoke_proc_location(th);
968 if (!NIL_P(loc)) {
969 rb_raise(rb_eThreadError,
970 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
971 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
972 }
973 else {
974 rb_raise(rb_eThreadError, "already initialized thread");
975 }
976 }
977 else {
978 struct thread_create_params params = {
979 .type = thread_invoke_type_proc,
980 .args = args,
981 .proc = rb_block_proc(),
982 };
983 return thread_create_core(thread, &params);
984 }
985}
986
988rb_thread_create(VALUE (*fn)(void *), void *arg)
989{
990 struct thread_create_params params = {
991 .type = thread_invoke_type_func,
992 .fn = fn,
993 .args = (VALUE)arg,
994 };
995 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
996}
997
998VALUE
999rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
1000{
1001 struct thread_create_params params = {
1002 .type = thread_invoke_type_ractor_proc,
1003 .g = r,
1004 .args = args,
1005 .proc = proc,
1006 };
1007 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1008}
1009
1011struct join_arg {
1012 struct rb_waiting_list *waiter;
1013 rb_thread_t *target;
1014 VALUE timeout;
1015 rb_hrtime_t *limit;
1016};
1017
1018static VALUE
1019remove_from_join_list(VALUE arg)
1020{
1021 struct join_arg *p = (struct join_arg *)arg;
1022 rb_thread_t *target_thread = p->target;
1023
1024 if (target_thread->status != THREAD_KILLED) {
1025 struct rb_waiting_list **join_list = &target_thread->join_list;
1026
1027 while (*join_list) {
1028 if (*join_list == p->waiter) {
1029 *join_list = (*join_list)->next;
1030 break;
1031 }
1032
1033 join_list = &(*join_list)->next;
1034 }
1035 }
1036
1037 return Qnil;
1038}
1039
1040static int
1041thread_finished(rb_thread_t *th)
1042{
1043 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1044}
1045
1046static VALUE
1047thread_join_sleep(VALUE arg)
1048{
1049 struct join_arg *p = (struct join_arg *)arg;
1050 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1051 rb_hrtime_t end = 0, *limit = p->limit;
1052
1053 if (limit) {
1054 end = rb_hrtime_add(*limit, rb_hrtime_now());
1055 }
1056
1057 while (!thread_finished(target_th)) {
1058 VALUE scheduler = rb_fiber_scheduler_current();
1059
1060 if (scheduler != Qnil) {
1061 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1062 // Check if the target thread is finished after blocking:
1063 if (thread_finished(target_th)) break;
1064 // Otherwise, a timeout occurred:
1065 else return Qfalse;
1066 }
1067 else if (!limit) {
1068 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1069 }
1070 else {
1071 if (hrtime_update_expire(limit, end)) {
1072 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1073 return Qfalse;
1074 }
1075 th->status = THREAD_STOPPED;
1076 native_sleep(th, limit);
1077 }
1078 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1079 th->status = THREAD_RUNNABLE;
1080
1081 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1082 }
1083
1084 return Qtrue;
1085}
1086
1087static VALUE
1088thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1089{
1090 rb_execution_context_t *ec = GET_EC();
1091 rb_thread_t *th = ec->thread_ptr;
1092 rb_fiber_t *fiber = ec->fiber_ptr;
1093
1094 if (th == target_th) {
1095 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1096 }
1097
1098 if (th->ractor->threads.main == target_th) {
1099 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1100 }
1101
1102 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1103
1104 if (target_th->status != THREAD_KILLED) {
1105 struct rb_waiting_list waiter;
1106 waiter.next = target_th->join_list;
1107 waiter.thread = th;
1108 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1109 target_th->join_list = &waiter;
1110
1111 struct join_arg arg;
1112 arg.waiter = &waiter;
1113 arg.target = target_th;
1114 arg.timeout = timeout;
1115 arg.limit = limit;
1116
1117 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1118 return Qnil;
1119 }
1120 }
1121
1122 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1123
1124 if (target_th->ec->errinfo != Qnil) {
1125 VALUE err = target_th->ec->errinfo;
1126
1127 if (FIXNUM_P(err)) {
1128 switch (err) {
1129 case INT2FIX(TAG_FATAL):
1130 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1131
1132 /* OK. killed. */
1133 break;
1134 default:
1135 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1136 }
1137 }
1138 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1139 rb_bug("thread_join: THROW_DATA should not reach here.");
1140 }
1141 else {
1142 /* normal exception */
1143 rb_exc_raise(err);
1144 }
1145 }
1146 return target_th->self;
1147}
1148
1149/*
1150 * call-seq:
1151 * thr.join -> thr
1152 * thr.join(limit) -> thr
1153 *
1154 * The calling thread will suspend execution and run this +thr+.
1155 *
1156 * Does not return until +thr+ exits or until the given +limit+ seconds have
1157 * passed.
1158 *
1159 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1160 * returned.
1161 *
1162 * Any threads not joined will be killed when the main program exits.
1163 *
1164 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1165 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1166 * will be processed at this time.
1167 *
1168 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1169 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1170 * x.join # Let thread x finish, thread a will be killed on exit.
1171 * #=> "axyz"
1172 *
1173 * The following example illustrates the +limit+ parameter.
1174 *
1175 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1176 * puts "Waiting" until y.join(0.15)
1177 *
1178 * This will produce:
1179 *
1180 * tick...
1181 * Waiting
1182 * tick...
1183 * Waiting
1184 * tick...
1185 * tick...
1186 */
1187
1188static VALUE
1189thread_join_m(int argc, VALUE *argv, VALUE self)
1190{
1191 VALUE timeout = Qnil;
1192 rb_hrtime_t rel = 0, *limit = 0;
1193
1194 if (rb_check_arity(argc, 0, 1)) {
1195 timeout = argv[0];
1196 }
1197
1198 // Convert the timeout eagerly, so it's always converted and deterministic
1199 /*
1200 * This supports INFINITY and negative values, so we can't use
1201 * rb_time_interval right now...
1202 */
1203 if (NIL_P(timeout)) {
1204 /* unlimited */
1205 }
1206 else if (FIXNUM_P(timeout)) {
1207 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1208 limit = &rel;
1209 }
1210 else {
1211 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1212 }
1213
1214 return thread_join(rb_thread_ptr(self), timeout, limit);
1215}
1216
1217/*
1218 * call-seq:
1219 * thr.value -> obj
1220 *
1221 * Waits for +thr+ to complete, using #join, and returns its value or raises
1222 * the exception which terminated the thread.
1223 *
1224 * a = Thread.new { 2 + 2 }
1225 * a.value #=> 4
1226 *
1227 * b = Thread.new { raise 'something went wrong' }
1228 * b.value #=> RuntimeError: something went wrong
1229 */
1230
1231static VALUE
1232thread_value(VALUE self)
1233{
1234 rb_thread_t *th = rb_thread_ptr(self);
1235 thread_join(th, Qnil, 0);
1236 if (UNDEF_P(th->value)) {
1237 // If the thread is dead because we forked th->value is still Qundef.
1238 return Qnil;
1239 }
1240 return th->value;
1241}
1242
1243/*
1244 * Thread Scheduling
1245 */
1246
1247static void
1248getclockofday(struct timespec *ts)
1249{
1250#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1251 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1252 return;
1253#endif
1254 rb_timespec_now(ts);
1255}
1256
1257/*
1258 * Don't inline this, since library call is already time consuming
1259 * and we don't want "struct timespec" on stack too long for GC
1260 */
1261NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1262rb_hrtime_t
1263rb_hrtime_now(void)
1264{
1265 struct timespec ts;
1266
1267 getclockofday(&ts);
1268 return rb_timespec2hrtime(&ts);
1269}
1270
1271/*
1272 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1273 * being uninitialized, maybe other versions, too.
1274 */
1275COMPILER_WARNING_PUSH
1276#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1277COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1278#endif
1279#ifndef PRIu64
1280#define PRIu64 PRI_64_PREFIX "u"
1281#endif
1282/*
1283 * @end is the absolute time when @ts is set to expire
1284 * Returns true if @end has past
1285 * Updates @ts and returns false otherwise
1286 */
1287static int
1288hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1289{
1290 rb_hrtime_t now = rb_hrtime_now();
1291
1292 if (now > end) return 1;
1293
1294 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1295
1296 *timeout = end - now;
1297 return 0;
1298}
1299COMPILER_WARNING_POP
1300
1301static int
1302sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1303{
1304 enum rb_thread_status prev_status = th->status;
1305 int woke;
1306 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1307
1308 th->status = THREAD_STOPPED;
1309 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1310 while (th->status == THREAD_STOPPED) {
1311 native_sleep(th, &rel);
1312 woke = vm_check_ints_blocking(th->ec);
1313 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1314 break;
1315 if (hrtime_update_expire(&rel, end))
1316 break;
1317 woke = 1;
1318 }
1319 th->status = prev_status;
1320 return woke;
1321}
1322
1323static int
1324sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1325{
1326 enum rb_thread_status prev_status = th->status;
1327 int woke;
1328 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1329
1330 th->status = THREAD_STOPPED;
1331 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1332 while (th->status == THREAD_STOPPED) {
1333 native_sleep(th, &rel);
1334 woke = vm_check_ints_blocking(th->ec);
1335 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1336 break;
1337 if (hrtime_update_expire(&rel, end))
1338 break;
1339 woke = 1;
1340 }
1341 th->status = prev_status;
1342 return woke;
1343}
1344
1345static void
1346sleep_forever(rb_thread_t *th, unsigned int fl)
1347{
1348 enum rb_thread_status prev_status = th->status;
1349 enum rb_thread_status status;
1350 int woke;
1351
1352 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1353 th->status = status;
1354
1355 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1356
1357 while (th->status == status) {
1358 if (fl & SLEEP_DEADLOCKABLE) {
1359 rb_ractor_sleeper_threads_inc(th->ractor);
1360 rb_check_deadlock(th->ractor);
1361 }
1362 {
1363 native_sleep(th, 0);
1364 }
1365 if (fl & SLEEP_DEADLOCKABLE) {
1366 rb_ractor_sleeper_threads_dec(th->ractor);
1367 }
1368 if (fl & SLEEP_ALLOW_SPURIOUS) {
1369 break;
1370 }
1371
1372 woke = vm_check_ints_blocking(th->ec);
1373
1374 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1375 break;
1376 }
1377 }
1378 th->status = prev_status;
1379}
1380
1381void
1383{
1384 RUBY_DEBUG_LOG("forever");
1385 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1386}
1387
1388void
1390{
1391 RUBY_DEBUG_LOG("deadly");
1392 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1393}
1394
1395static void
1396rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1397{
1398 VALUE scheduler = rb_fiber_scheduler_current();
1399 if (scheduler != Qnil) {
1400 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1401 }
1402 else {
1403 RUBY_DEBUG_LOG("...");
1404 if (end) {
1405 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1406 }
1407 else {
1408 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1409 }
1410 }
1411}
1412
1413void
1414rb_thread_wait_for(struct timeval time)
1415{
1416 rb_thread_t *th = GET_THREAD();
1417
1418 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1419}
1420
1421void
1422rb_ec_check_ints(rb_execution_context_t *ec)
1423{
1424 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1425}
1426
1427/*
1428 * CAUTION: This function causes thread switching.
1429 * rb_thread_check_ints() check ruby's interrupts.
1430 * some interrupt needs thread switching/invoke handlers,
1431 * and so on.
1432 */
1433
1434void
1436{
1437 rb_ec_check_ints(GET_EC());
1438}
1439
1440/*
1441 * Hidden API for tcl/tk wrapper.
1442 * There is no guarantee to perpetuate it.
1443 */
1444int
1445rb_thread_check_trap_pending(void)
1446{
1447 return rb_signal_buff_size() != 0;
1448}
1449
1450/* This function can be called in blocking region. */
1453{
1454 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1455}
1456
1457void
1458rb_thread_sleep(int sec)
1459{
1461}
1462
1463static void
1464rb_thread_schedule_limits(uint32_t limits_us)
1465{
1466 if (!rb_thread_alone()) {
1467 rb_thread_t *th = GET_THREAD();
1468 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1469
1470 if (th->running_time_us >= limits_us) {
1471 RUBY_DEBUG_LOG("switch %s", "start");
1472
1473 RB_VM_SAVE_MACHINE_CONTEXT(th);
1474 thread_sched_yield(TH_SCHED(th), th);
1475 rb_ractor_thread_switch(th->ractor, th);
1476
1477 RUBY_DEBUG_LOG("switch %s", "done");
1478 }
1479 }
1480}
1481
1482void
1484{
1485 rb_thread_schedule_limits(0);
1486 RUBY_VM_CHECK_INTS(GET_EC());
1487}
1488
1489/* blocking region */
1490
1491static inline int
1492blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1493 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1494{
1495#ifdef RUBY_ASSERT_CRITICAL_SECTION
1496 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1497#endif
1498 VM_ASSERT(th == GET_THREAD());
1499
1500 region->prev_status = th->status;
1501 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1502 th->blocking_region_buffer = region;
1503 th->status = THREAD_STOPPED;
1504 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1505
1506 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1507 return TRUE;
1508 }
1509 else {
1510 return FALSE;
1511 }
1512}
1513
1514static inline void
1515blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1516{
1517 /* entry to ubf_list still permitted at this point, make it impossible: */
1518 unblock_function_clear(th);
1519 /* entry to ubf_list impossible at this point, so unregister is safe: */
1520 unregister_ubf_list(th);
1521
1522 thread_sched_to_running(TH_SCHED(th), th);
1523 rb_ractor_thread_switch(th->ractor, th);
1524
1525 th->blocking_region_buffer = 0;
1526 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1527 if (th->status == THREAD_STOPPED) {
1528 th->status = region->prev_status;
1529 }
1530
1531 RUBY_DEBUG_LOG("end");
1532
1533#ifndef _WIN32
1534 // GET_THREAD() clears WSAGetLastError()
1535 VM_ASSERT(th == GET_THREAD());
1536#endif
1537}
1538
1539void *
1540rb_nogvl(void *(*func)(void *), void *data1,
1541 rb_unblock_function_t *ubf, void *data2,
1542 int flags)
1543{
1544 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1545 VALUE scheduler = rb_fiber_scheduler_current();
1546 if (scheduler != Qnil) {
1548
1549 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1550
1551 if (!UNDEF_P(result)) {
1552 rb_errno_set(state.saved_errno);
1553 return state.result;
1554 }
1555 }
1556 }
1557
1558 void *val = 0;
1559 rb_execution_context_t *ec = GET_EC();
1560 rb_thread_t *th = rb_ec_thread_ptr(ec);
1561 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1562 bool is_main_thread = vm->ractor.main_thread == th;
1563 int saved_errno = 0;
1564
1565 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1566 ubf = ubf_select;
1567 data2 = th;
1568 }
1569 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1570 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1571 vm->ubf_async_safe = 1;
1572 }
1573 }
1574
1575 rb_vm_t *volatile saved_vm = vm;
1576 BLOCKING_REGION(th, {
1577 val = func(data1);
1578 saved_errno = rb_errno();
1579 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1580 vm = saved_vm;
1581
1582 if (is_main_thread) vm->ubf_async_safe = 0;
1583
1584 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1585 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1586 }
1587
1588 rb_errno_set(saved_errno);
1589
1590 return val;
1591}
1592
1593/*
1594 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1595 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1596 * without interrupt process.
1597 *
1598 * rb_thread_call_without_gvl() does:
1599 * (1) Check interrupts.
1600 * (2) release GVL.
1601 * Other Ruby threads may run in parallel.
1602 * (3) call func with data1
1603 * (4) acquire GVL.
1604 * Other Ruby threads can not run in parallel any more.
1605 * (5) Check interrupts.
1606 *
1607 * rb_thread_call_without_gvl2() does:
1608 * (1) Check interrupt and return if interrupted.
1609 * (2) release GVL.
1610 * (3) call func with data1 and a pointer to the flags.
1611 * (4) acquire GVL.
1612 *
1613 * If another thread interrupts this thread (Thread#kill, signal delivery,
1614 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1615 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1616 * toggling a cancellation flag, canceling the invocation of a call inside
1617 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1618 *
1619 * There are built-in ubfs and you can specify these ubfs:
1620 *
1621 * * RUBY_UBF_IO: ubf for IO operation
1622 * * RUBY_UBF_PROCESS: ubf for process operation
1623 *
1624 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1625 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1626 * provide proper ubf(), your program will not stop for Control+C or other
1627 * shutdown events.
1628 *
1629 * "Check interrupts" on above list means checking asynchronous
1630 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1631 * request, and so on) and calling corresponding procedures
1632 * (such as `trap' for signals, raise an exception for Thread#raise).
1633 * If `func()' finished and received interrupts, you may skip interrupt
1634 * checking. For example, assume the following func() it reads data from file.
1635 *
1636 * read_func(...) {
1637 * // (a) before read
1638 * read(buffer); // (b) reading
1639 * // (c) after read
1640 * }
1641 *
1642 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1643 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1644 * at (c), after *read* operation is completed, checking interrupts is harmful
1645 * because it causes irrevocable side-effect, the read data will vanish. To
1646 * avoid such problem, the `read_func()' should be used with
1647 * `rb_thread_call_without_gvl2()'.
1648 *
1649 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1650 * immediately. This function does not show when the execution was interrupted.
1651 * For example, there are 4 possible timing (a), (b), (c) and before calling
1652 * read_func(). You need to record progress of a read_func() and check
1653 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1654 * `rb_thread_check_ints()' correctly or your program can not process proper
1655 * process such as `trap' and so on.
1656 *
1657 * NOTE: You can not execute most of Ruby C API and touch Ruby
1658 * objects in `func()' and `ubf()', including raising an
1659 * exception, because current thread doesn't acquire GVL
1660 * (it causes synchronization problems). If you need to
1661 * call ruby functions either use rb_thread_call_with_gvl()
1662 * or read source code of C APIs and confirm safety by
1663 * yourself.
1664 *
1665 * NOTE: In short, this API is difficult to use safely. I recommend you
1666 * use other ways if you have. We lack experiences to use this API.
1667 * Please report your problem related on it.
1668 *
1669 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1670 * for a short running `func()'. Be sure to benchmark and use this
1671 * mechanism when `func()' consumes enough time.
1672 *
1673 * Safe C API:
1674 * * rb_thread_interrupted() - check interrupt flag
1675 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1676 * they will work without GVL, and may acquire GVL when GC is needed.
1677 */
1678void *
1679rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1680 rb_unblock_function_t *ubf, void *data2)
1681{
1682 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1683}
1684
1685void *
1686rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1687 rb_unblock_function_t *ubf, void *data2)
1688{
1689 return rb_nogvl(func, data1, ubf, data2, 0);
1690}
1691
1692static int
1693waitfd_to_waiting_flag(int wfd_event)
1694{
1695 return wfd_event << 1;
1696}
1697
1698static void
1699thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1700{
1701 wfd->fd = fd;
1702 wfd->th = th;
1703 wfd->busy = NULL;
1704
1705 RB_VM_LOCK_ENTER();
1706 {
1707 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1708 }
1709 RB_VM_LOCK_LEAVE();
1710}
1711
1712static void
1713thread_io_wake_pending_closer(struct waiting_fd *wfd)
1714{
1715 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1716 if (has_waiter) {
1717 rb_mutex_lock(wfd->busy->wakeup_mutex);
1718 }
1719
1720 /* Needs to be protected with RB_VM_LOCK because we don't know if
1721 wfd is on the global list of pending FD ops or if it's on a
1722 struct rb_io_close_wait_list close-waiter. */
1723 RB_VM_LOCK_ENTER();
1724 ccan_list_del(&wfd->wfd_node);
1725 RB_VM_LOCK_LEAVE();
1726
1727 if (has_waiter) {
1728 rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
1729 if (th->scheduler != Qnil) {
1730 rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
1731 } else {
1732 rb_thread_wakeup(wfd->busy->closing_thread);
1733 }
1734 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1735 }
1736}
1737
1738static bool
1739thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1740{
1741#if defined(USE_MN_THREADS) && USE_MN_THREADS
1742 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1743#else
1744 return false;
1745#endif
1746}
1747
1748// true if need retry
1749static bool
1750thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1751{
1752#if defined(USE_MN_THREADS) && USE_MN_THREADS
1753 if (thread_io_mn_schedulable(th, events, timeout)) {
1754 rb_hrtime_t rel, *prel;
1755
1756 if (timeout) {
1757 rel = rb_timeval2hrtime(timeout);
1758 prel = &rel;
1759 }
1760 else {
1761 prel = NULL;
1762 }
1763
1764 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1765
1766 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1767 // timeout
1768 return false;
1769 }
1770 else {
1771 return true;
1772 }
1773 }
1774#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1775 return false;
1776}
1777
1778// assume read/write
1779static bool
1780blocking_call_retryable_p(int r, int eno)
1781{
1782 if (r != -1) return false;
1783
1784 switch (eno) {
1785 case EAGAIN:
1786#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1787 case EWOULDBLOCK:
1788#endif
1789 return true;
1790 default:
1791 return false;
1792 }
1793}
1794
1795bool
1796rb_thread_mn_schedulable(VALUE thval)
1797{
1798 rb_thread_t *th = rb_thread_ptr(thval);
1799 return th->mn_schedulable;
1800}
1801
1802VALUE
1803rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events)
1804{
1805 rb_execution_context_t *volatile ec = GET_EC();
1806 rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
1807
1808 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events);
1809
1810 struct waiting_fd waiting_fd;
1811 volatile VALUE val = Qundef; /* shouldn't be used */
1812 volatile int saved_errno = 0;
1813 enum ruby_tag_type state;
1814 bool prev_mn_schedulable = th->mn_schedulable;
1815 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1816
1817 // `errno` is only valid when there is an actual error - but we can't
1818 // extract that from the return value of `func` alone, so we clear any
1819 // prior `errno` value here so that we can later check if it was set by
1820 // `func` or not (as opposed to some previously set value).
1821 errno = 0;
1822
1823 thread_io_setup_wfd(th, fd, &waiting_fd);
1824 {
1825 EC_PUSH_TAG(ec);
1826 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1827 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1828 retry:
1829 BLOCKING_REGION(waiting_fd.th, {
1830 val = func(data1);
1831 saved_errno = errno;
1832 }, ubf_select, waiting_fd.th, FALSE);
1833
1834 th = rb_ec_thread_ptr(ec);
1835 if (events &&
1836 blocking_call_retryable_p((int)val, saved_errno) &&
1837 thread_io_wait_events(th, fd, events, NULL)) {
1838 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1839 goto retry;
1840 }
1841 state = saved_state;
1842 }
1843 EC_POP_TAG();
1844
1845 th = rb_ec_thread_ptr(ec);
1846 th->mn_schedulable = prev_mn_schedulable;
1847 }
1848 /*
1849 * must be deleted before jump
1850 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1851 */
1852 thread_io_wake_pending_closer(&waiting_fd);
1853
1854 if (state) {
1855 EC_JUMP_TAG(ec, state);
1856 }
1857 /* TODO: check func() */
1858 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1859
1860 // If the error was a timeout, we raise a specific exception for that:
1861 if (saved_errno == ETIMEDOUT) {
1862 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1863 }
1864
1865 errno = saved_errno;
1866
1867 return val;
1868}
1869
1870VALUE
1871rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1872{
1873 return rb_thread_io_blocking_call(func, data1, fd, 0);
1874}
1875
1876/*
1877 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1878 *
1879 * After releasing GVL using
1880 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1881 * methods. If you need to access Ruby you must use this function
1882 * rb_thread_call_with_gvl().
1883 *
1884 * This function rb_thread_call_with_gvl() does:
1885 * (1) acquire GVL.
1886 * (2) call passed function `func'.
1887 * (3) release GVL.
1888 * (4) return a value which is returned at (2).
1889 *
1890 * NOTE: You should not return Ruby object at (2) because such Object
1891 * will not be marked.
1892 *
1893 * NOTE: If an exception is raised in `func', this function DOES NOT
1894 * protect (catch) the exception. If you have any resources
1895 * which should free before throwing exception, you need use
1896 * rb_protect() in `func' and return a value which represents
1897 * exception was raised.
1898 *
1899 * NOTE: This function should not be called by a thread which was not
1900 * created as Ruby thread (created by Thread.new or so). In other
1901 * words, this function *DOES NOT* associate or convert a NON-Ruby
1902 * thread to a Ruby thread.
1903 */
1904void *
1905rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1906{
1907 rb_thread_t *th = ruby_thread_from_native();
1908 struct rb_blocking_region_buffer *brb;
1909 struct rb_unblock_callback prev_unblock;
1910 void *r;
1911
1912 if (th == 0) {
1913 /* Error has occurred, but we can't use rb_bug()
1914 * because this thread is not Ruby's thread.
1915 * What should we do?
1916 */
1917 bp();
1918 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1919 exit(EXIT_FAILURE);
1920 }
1921
1922 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1923 prev_unblock = th->unblock;
1924
1925 if (brb == 0) {
1926 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1927 }
1928
1929 blocking_region_end(th, brb);
1930 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1931 r = (*func)(data1);
1932 /* leave from Ruby world: You can not access Ruby values, etc. */
1933 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1934 RUBY_ASSERT_ALWAYS(released);
1935 RB_VM_SAVE_MACHINE_CONTEXT(th);
1936 thread_sched_to_waiting(TH_SCHED(th), th);
1937 return r;
1938}
1939
1940/*
1941 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1942 *
1943 ***
1944 *** This API is EXPERIMENTAL!
1945 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1946 ***
1947 */
1948
1949int
1950ruby_thread_has_gvl_p(void)
1951{
1952 rb_thread_t *th = ruby_thread_from_native();
1953
1954 if (th && th->blocking_region_buffer == 0) {
1955 return 1;
1956 }
1957 else {
1958 return 0;
1959 }
1960}
1961
1962/*
1963 * call-seq:
1964 * Thread.pass -> nil
1965 *
1966 * Give the thread scheduler a hint to pass execution to another thread.
1967 * A running thread may or may not switch, it depends on OS and processor.
1968 */
1969
1970static VALUE
1971thread_s_pass(VALUE klass)
1972{
1974 return Qnil;
1975}
1976
1977/*****************************************************/
1978
1979/*
1980 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1981 *
1982 * Async events such as an exception thrown by Thread#raise,
1983 * Thread#kill and thread termination (after main thread termination)
1984 * will be queued to th->pending_interrupt_queue.
1985 * - clear: clear the queue.
1986 * - enque: enqueue err object into queue.
1987 * - deque: dequeue err object from queue.
1988 * - active_p: return 1 if the queue should be checked.
1989 *
1990 * All rb_threadptr_pending_interrupt_* functions are called by
1991 * a GVL acquired thread, of course.
1992 * Note that all "rb_" prefix APIs need GVL to call.
1993 */
1994
1995void
1996rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1997{
1998 rb_ary_clear(th->pending_interrupt_queue);
1999}
2000
2001void
2002rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2003{
2004 rb_ary_push(th->pending_interrupt_queue, v);
2005 th->pending_interrupt_queue_checked = 0;
2006}
2007
2008static void
2009threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2010{
2011 if (!th->pending_interrupt_queue) {
2012 rb_raise(rb_eThreadError, "uninitialized thread");
2013 }
2014}
2015
2016enum handle_interrupt_timing {
2017 INTERRUPT_NONE,
2018 INTERRUPT_IMMEDIATE,
2019 INTERRUPT_ON_BLOCKING,
2020 INTERRUPT_NEVER
2021};
2022
2023static enum handle_interrupt_timing
2024rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2025{
2026 if (sym == sym_immediate) {
2027 return INTERRUPT_IMMEDIATE;
2028 }
2029 else if (sym == sym_on_blocking) {
2030 return INTERRUPT_ON_BLOCKING;
2031 }
2032 else if (sym == sym_never) {
2033 return INTERRUPT_NEVER;
2034 }
2035 else {
2036 rb_raise(rb_eThreadError, "unknown mask signature");
2037 }
2038}
2039
2040static enum handle_interrupt_timing
2041rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2042{
2043 VALUE mask;
2044 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2045 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2046 VALUE mod;
2047 long i;
2048
2049 for (i=0; i<mask_stack_len; i++) {
2050 mask = mask_stack[mask_stack_len-(i+1)];
2051
2052 if (SYMBOL_P(mask)) {
2053 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2054 if (err != rb_cInteger) {
2055 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2056 }
2057 else {
2058 continue;
2059 }
2060 }
2061
2062 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2063 VALUE klass = mod;
2064 VALUE sym;
2065
2066 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2067 klass = RBASIC(mod)->klass;
2068 }
2069 else if (mod != RCLASS_ORIGIN(mod)) {
2070 continue;
2071 }
2072
2073 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2074 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2075 }
2076 }
2077 /* try to next mask */
2078 }
2079 return INTERRUPT_NONE;
2080}
2081
2082static int
2083rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2084{
2085 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2086}
2087
2088static int
2089rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2090{
2091 int i;
2092 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2093 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2094 if (rb_obj_is_kind_of(e, err)) {
2095 return TRUE;
2096 }
2097 }
2098 return FALSE;
2099}
2100
2101static VALUE
2102rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2103{
2104#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2105 int i;
2106
2107 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2108 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2109
2110 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2111
2112 switch (mask_timing) {
2113 case INTERRUPT_ON_BLOCKING:
2114 if (timing != INTERRUPT_ON_BLOCKING) {
2115 break;
2116 }
2117 /* fall through */
2118 case INTERRUPT_NONE: /* default: IMMEDIATE */
2119 case INTERRUPT_IMMEDIATE:
2120 rb_ary_delete_at(th->pending_interrupt_queue, i);
2121 return err;
2122 case INTERRUPT_NEVER:
2123 break;
2124 }
2125 }
2126
2127 th->pending_interrupt_queue_checked = 1;
2128 return Qundef;
2129#else
2130 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2131 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2132 th->pending_interrupt_queue_checked = 1;
2133 }
2134 return err;
2135#endif
2136}
2137
2138static int
2139threadptr_pending_interrupt_active_p(rb_thread_t *th)
2140{
2141 /*
2142 * For optimization, we don't check async errinfo queue
2143 * if the queue and the thread interrupt mask were not changed
2144 * since last check.
2145 */
2146 if (th->pending_interrupt_queue_checked) {
2147 return 0;
2148 }
2149
2150 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2151 return 0;
2152 }
2153
2154 return 1;
2155}
2156
2157static int
2158handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2159{
2160 VALUE *maskp = (VALUE *)args;
2161
2162 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2163 rb_raise(rb_eArgError, "unknown mask signature");
2164 }
2165
2166 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2167 *maskp = val;
2168 return ST_CONTINUE;
2169 }
2170
2171 if (RTEST(*maskp)) {
2172 if (!RB_TYPE_P(*maskp, T_HASH)) {
2173 VALUE prev = *maskp;
2174 *maskp = rb_ident_hash_new();
2175 if (SYMBOL_P(prev)) {
2176 rb_hash_aset(*maskp, rb_eException, prev);
2177 }
2178 }
2179 rb_hash_aset(*maskp, key, val);
2180 }
2181 else {
2182 *maskp = Qfalse;
2183 }
2184
2185 return ST_CONTINUE;
2186}
2187
2188/*
2189 * call-seq:
2190 * Thread.handle_interrupt(hash) { ... } -> result of the block
2191 *
2192 * Changes asynchronous interrupt timing.
2193 *
2194 * _interrupt_ means asynchronous event and corresponding procedure
2195 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2196 * and main thread termination (if main thread terminates, then all
2197 * other thread will be killed).
2198 *
2199 * The given +hash+ has pairs like <code>ExceptionClass =>
2200 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2201 * the given block. The TimingSymbol can be one of the following symbols:
2202 *
2203 * [+:immediate+] Invoke interrupts immediately.
2204 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2205 * [+:never+] Never invoke all interrupts.
2206 *
2207 * _BlockingOperation_ means that the operation will block the calling thread,
2208 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2209 * operation executed without GVL.
2210 *
2211 * Masked asynchronous interrupts are delayed until they are enabled.
2212 * This method is similar to sigprocmask(3).
2213 *
2214 * === NOTE
2215 *
2216 * Asynchronous interrupts are difficult to use.
2217 *
2218 * If you need to communicate between threads, please consider to use another way such as Queue.
2219 *
2220 * Or use them with deep understanding about this method.
2221 *
2222 * === Usage
2223 *
2224 * In this example, we can guard from Thread#raise exceptions.
2225 *
2226 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2227 * ignored in the first block of the main thread. In the second
2228 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2229 *
2230 * th = Thread.new do
2231 * Thread.handle_interrupt(RuntimeError => :never) {
2232 * begin
2233 * # You can write resource allocation code safely.
2234 * Thread.handle_interrupt(RuntimeError => :immediate) {
2235 * # ...
2236 * }
2237 * ensure
2238 * # You can write resource deallocation code safely.
2239 * end
2240 * }
2241 * end
2242 * Thread.pass
2243 * # ...
2244 * th.raise "stop"
2245 *
2246 * While we are ignoring the RuntimeError exception, it's safe to write our
2247 * resource allocation code. Then, the ensure block is where we can safely
2248 * deallocate your resources.
2249 *
2250 * ==== Stack control settings
2251 *
2252 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2253 * to control more than one ExceptionClass and TimingSymbol at a time.
2254 *
2255 * Thread.handle_interrupt(FooError => :never) {
2256 * Thread.handle_interrupt(BarError => :never) {
2257 * # FooError and BarError are prohibited.
2258 * }
2259 * }
2260 *
2261 * ==== Inheritance with ExceptionClass
2262 *
2263 * All exceptions inherited from the ExceptionClass parameter will be considered.
2264 *
2265 * Thread.handle_interrupt(Exception => :never) {
2266 * # all exceptions inherited from Exception are prohibited.
2267 * }
2268 *
2269 * For handling all interrupts, use +Object+ and not +Exception+
2270 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2271 */
2272static VALUE
2273rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2274{
2275 VALUE mask = Qundef;
2276 rb_execution_context_t * volatile ec = GET_EC();
2277 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2278 volatile VALUE r = Qnil;
2279 enum ruby_tag_type state;
2280
2281 if (!rb_block_given_p()) {
2282 rb_raise(rb_eArgError, "block is needed.");
2283 }
2284
2285 mask_arg = rb_to_hash_type(mask_arg);
2286
2287 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2288 mask = Qnil;
2289 }
2290
2291 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2292
2293 if (UNDEF_P(mask)) {
2294 return rb_yield(Qnil);
2295 }
2296
2297 if (!RTEST(mask)) {
2298 mask = mask_arg;
2299 }
2300 else if (RB_TYPE_P(mask, T_HASH)) {
2301 OBJ_FREEZE(mask);
2302 }
2303
2304 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2305 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2306 th->pending_interrupt_queue_checked = 0;
2307 RUBY_VM_SET_INTERRUPT(th->ec);
2308 }
2309
2310 EC_PUSH_TAG(th->ec);
2311 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2312 r = rb_yield(Qnil);
2313 }
2314 EC_POP_TAG();
2315
2316 rb_ary_pop(th->pending_interrupt_mask_stack);
2317 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2318 th->pending_interrupt_queue_checked = 0;
2319 RUBY_VM_SET_INTERRUPT(th->ec);
2320 }
2321
2322 RUBY_VM_CHECK_INTS(th->ec);
2323
2324 if (state) {
2325 EC_JUMP_TAG(th->ec, state);
2326 }
2327
2328 return r;
2329}
2330
2331/*
2332 * call-seq:
2333 * target_thread.pending_interrupt?(error = nil) -> true/false
2334 *
2335 * Returns whether or not the asynchronous queue is empty for the target thread.
2336 *
2337 * If +error+ is given, then check only for +error+ type deferred events.
2338 *
2339 * See ::pending_interrupt? for more information.
2340 */
2341static VALUE
2342rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2343{
2344 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2345
2346 if (!target_th->pending_interrupt_queue) {
2347 return Qfalse;
2348 }
2349 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2350 return Qfalse;
2351 }
2352 if (rb_check_arity(argc, 0, 1)) {
2353 VALUE err = argv[0];
2354 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2355 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2356 }
2357 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2358 }
2359 else {
2360 return Qtrue;
2361 }
2362}
2363
2364/*
2365 * call-seq:
2366 * Thread.pending_interrupt?(error = nil) -> true/false
2367 *
2368 * Returns whether or not the asynchronous queue is empty.
2369 *
2370 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2371 * this method can be used to determine if there are any deferred events.
2372 *
2373 * If you find this method returns true, then you may finish +:never+ blocks.
2374 *
2375 * For example, the following method processes deferred asynchronous events
2376 * immediately.
2377 *
2378 * def Thread.kick_interrupt_immediately
2379 * Thread.handle_interrupt(Object => :immediate) {
2380 * Thread.pass
2381 * }
2382 * end
2383 *
2384 * If +error+ is given, then check only for +error+ type deferred events.
2385 *
2386 * === Usage
2387 *
2388 * th = Thread.new{
2389 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2390 * while true
2391 * ...
2392 * # reach safe point to invoke interrupt
2393 * if Thread.pending_interrupt?
2394 * Thread.handle_interrupt(Object => :immediate){}
2395 * end
2396 * ...
2397 * end
2398 * }
2399 * }
2400 * ...
2401 * th.raise # stop thread
2402 *
2403 * This example can also be written as the following, which you should use to
2404 * avoid asynchronous interrupts.
2405 *
2406 * flag = true
2407 * th = Thread.new{
2408 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2409 * while true
2410 * ...
2411 * # reach safe point to invoke interrupt
2412 * break if flag == false
2413 * ...
2414 * end
2415 * }
2416 * }
2417 * ...
2418 * flag = false # stop thread
2419 */
2420
2421static VALUE
2422rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2423{
2424 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2425}
2426
2427NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2428
2429static void
2430rb_threadptr_to_kill(rb_thread_t *th)
2431{
2432 rb_threadptr_pending_interrupt_clear(th);
2433 th->status = THREAD_RUNNABLE;
2434 th->to_kill = 1;
2435 th->ec->errinfo = INT2FIX(TAG_FATAL);
2436 EC_JUMP_TAG(th->ec, TAG_FATAL);
2437}
2438
2439static inline rb_atomic_t
2440threadptr_get_interrupts(rb_thread_t *th)
2441{
2442 rb_execution_context_t *ec = th->ec;
2443 rb_atomic_t interrupt;
2444 rb_atomic_t old;
2445
2446 do {
2447 interrupt = ec->interrupt_flag;
2448 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2449 } while (old != interrupt);
2450 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2451}
2452
2453static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2454
2455int
2456rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2457{
2458 rb_atomic_t interrupt;
2459 int postponed_job_interrupt = 0;
2460 int ret = FALSE;
2461
2462 if (th->ec->raised_flag) return ret;
2463
2464 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2465 int sig;
2466 int timer_interrupt;
2467 int pending_interrupt;
2468 int trap_interrupt;
2469 int terminate_interrupt;
2470
2471 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2472 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2473 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2474 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2475 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2476
2477 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2478 RB_VM_LOCK_ENTER();
2479 RB_VM_LOCK_LEAVE();
2480 }
2481
2482 if (postponed_job_interrupt) {
2483 rb_postponed_job_flush(th->vm);
2484 }
2485
2486 if (trap_interrupt) {
2487 /* signal handling */
2488 if (th == th->vm->ractor.main_thread) {
2489 enum rb_thread_status prev_status = th->status;
2490
2491 th->status = THREAD_RUNNABLE;
2492 {
2493 while ((sig = rb_get_next_signal()) != 0) {
2494 ret |= rb_signal_exec(th, sig);
2495 }
2496 }
2497 th->status = prev_status;
2498 }
2499
2500 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2501 enum rb_thread_status prev_status = th->status;
2502
2503 th->status = THREAD_RUNNABLE;
2504 {
2505 threadptr_interrupt_exec_exec(th);
2506 }
2507 th->status = prev_status;
2508 }
2509 }
2510
2511 /* exception from another thread */
2512 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2513 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2514 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2515 ret = TRUE;
2516
2517 if (UNDEF_P(err)) {
2518 /* no error */
2519 }
2520 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2521 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2522 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2523 terminate_interrupt = 1;
2524 }
2525 else {
2526 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2527 /* the only special exception to be queued across thread */
2528 err = ruby_vm_special_exception_copy(err);
2529 }
2530 /* set runnable if th was slept. */
2531 if (th->status == THREAD_STOPPED ||
2532 th->status == THREAD_STOPPED_FOREVER)
2533 th->status = THREAD_RUNNABLE;
2534 rb_exc_raise(err);
2535 }
2536 }
2537
2538 if (terminate_interrupt) {
2539 rb_threadptr_to_kill(th);
2540 }
2541
2542 if (timer_interrupt) {
2543 uint32_t limits_us = thread_default_quantum_ms * 1000;
2544
2545 if (th->priority > 0)
2546 limits_us <<= th->priority;
2547 else
2548 limits_us >>= -th->priority;
2549
2550 if (th->status == THREAD_RUNNABLE)
2551 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2552
2553 VM_ASSERT(th->ec->cfp);
2554 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2555 0, 0, 0, Qundef);
2556
2557 rb_thread_schedule_limits(limits_us);
2558 }
2559 }
2560 return ret;
2561}
2562
2563void
2564rb_thread_execute_interrupts(VALUE thval)
2565{
2566 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2567}
2568
2569static void
2570rb_threadptr_ready(rb_thread_t *th)
2571{
2572 rb_threadptr_interrupt(th);
2573}
2574
2575static VALUE
2576rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2577{
2578 VALUE exc;
2579
2580 if (rb_threadptr_dead(target_th)) {
2581 return Qnil;
2582 }
2583
2584 if (argc == 0) {
2585 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2586 }
2587 else {
2588 exc = rb_make_exception(argc, argv);
2589 }
2590
2591 /* making an exception object can switch thread,
2592 so we need to check thread deadness again */
2593 if (rb_threadptr_dead(target_th)) {
2594 return Qnil;
2595 }
2596
2597 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2598 rb_threadptr_pending_interrupt_enque(target_th, exc);
2599 rb_threadptr_interrupt(target_th);
2600 return Qnil;
2601}
2602
2603void
2604rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2605{
2606 VALUE argv[2];
2607
2608 argv[0] = rb_eSignal;
2609 argv[1] = INT2FIX(sig);
2610 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2611}
2612
2613void
2614rb_threadptr_signal_exit(rb_thread_t *th)
2615{
2616 VALUE argv[2];
2617
2618 argv[0] = rb_eSystemExit;
2619 argv[1] = rb_str_new2("exit");
2620
2621 // TODO: check signal raise deliverly
2622 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2623}
2624
2625int
2626rb_ec_set_raised(rb_execution_context_t *ec)
2627{
2628 if (ec->raised_flag & RAISED_EXCEPTION) {
2629 return 1;
2630 }
2631 ec->raised_flag |= RAISED_EXCEPTION;
2632 return 0;
2633}
2634
2635int
2636rb_ec_reset_raised(rb_execution_context_t *ec)
2637{
2638 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2639 return 0;
2640 }
2641 ec->raised_flag &= ~RAISED_EXCEPTION;
2642 return 1;
2643}
2644
2645int
2646rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2647{
2648 rb_vm_t *vm = GET_THREAD()->vm;
2649 struct waiting_fd *wfd = 0, *next;
2650 ccan_list_head_init(&busy->pending_fd_users);
2651 int has_any;
2652 VALUE wakeup_mutex;
2653
2654 RB_VM_LOCK_ENTER();
2655 {
2656 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2657 if (wfd->fd == fd) {
2658 rb_thread_t *th = wfd->th;
2659 VALUE err;
2660
2661 ccan_list_del(&wfd->wfd_node);
2662 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2663
2664 wfd->busy = busy;
2665 err = th->vm->special_exceptions[ruby_error_stream_closed];
2666 rb_threadptr_pending_interrupt_enque(th, err);
2667 rb_threadptr_interrupt(th);
2668 }
2669 }
2670 }
2671
2672 has_any = !ccan_list_empty(&busy->pending_fd_users);
2673 busy->closing_thread = rb_thread_current();
2674 busy->closing_fiber = rb_fiber_current();
2675 wakeup_mutex = Qnil;
2676 if (has_any) {
2677 wakeup_mutex = rb_mutex_new();
2678 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2679 }
2680 busy->wakeup_mutex = wakeup_mutex;
2681
2682 RB_VM_LOCK_LEAVE();
2683
2684 /* If the caller didn't pass *busy as a pointer to something on the stack,
2685 we need to guard this mutex object on _our_ C stack for the duration
2686 of this function. */
2687 RB_GC_GUARD(wakeup_mutex);
2688 return has_any;
2689}
2690
2691void
2692rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2693{
2694 if (!RB_TEST(busy->wakeup_mutex)) {
2695 /* There was nobody else using this file when we closed it, so we
2696 never bothered to allocate a mutex*/
2697 return;
2698 }
2699
2700 rb_mutex_lock(busy->wakeup_mutex);
2701 while (!ccan_list_empty(&busy->pending_fd_users)) {
2702 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2703 }
2704 rb_mutex_unlock(busy->wakeup_mutex);
2705}
2706
2707void
2708rb_thread_fd_close(int fd)
2709{
2710 struct rb_io_close_wait_list busy;
2711
2712 if (rb_notify_fd_close(fd, &busy)) {
2713 rb_notify_fd_close_wait(&busy);
2714 }
2715}
2716
2717/*
2718 * call-seq:
2719 * thr.raise
2720 * thr.raise(string)
2721 * thr.raise(exception [, string [, array]])
2722 *
2723 * Raises an exception from the given thread. The caller does not have to be
2724 * +thr+. See Kernel#raise for more information.
2725 *
2726 * Thread.abort_on_exception = true
2727 * a = Thread.new { sleep(200) }
2728 * a.raise("Gotcha")
2729 *
2730 * This will produce:
2731 *
2732 * prog.rb:3: Gotcha (RuntimeError)
2733 * from prog.rb:2:in `initialize'
2734 * from prog.rb:2:in `new'
2735 * from prog.rb:2
2736 */
2737
2738static VALUE
2739thread_raise_m(int argc, VALUE *argv, VALUE self)
2740{
2741 rb_thread_t *target_th = rb_thread_ptr(self);
2742 const rb_thread_t *current_th = GET_THREAD();
2743
2744 threadptr_check_pending_interrupt_queue(target_th);
2745 rb_threadptr_raise(target_th, argc, argv);
2746
2747 /* To perform Thread.current.raise as Kernel.raise */
2748 if (current_th == target_th) {
2749 RUBY_VM_CHECK_INTS(target_th->ec);
2750 }
2751 return Qnil;
2752}
2753
2754
2755/*
2756 * call-seq:
2757 * thr.exit -> thr
2758 * thr.kill -> thr
2759 * thr.terminate -> thr
2760 *
2761 * Terminates +thr+ and schedules another thread to be run, returning
2762 * the terminated Thread. If this is the main thread, or the last
2763 * thread, exits the process.
2764 */
2765
2767rb_thread_kill(VALUE thread)
2768{
2769 rb_thread_t *target_th = rb_thread_ptr(thread);
2770
2771 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2772 return thread;
2773 }
2774 if (target_th == target_th->vm->ractor.main_thread) {
2775 rb_exit(EXIT_SUCCESS);
2776 }
2777
2778 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2779
2780 if (target_th == GET_THREAD()) {
2781 /* kill myself immediately */
2782 rb_threadptr_to_kill(target_th);
2783 }
2784 else {
2785 threadptr_check_pending_interrupt_queue(target_th);
2786 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2787 rb_threadptr_interrupt(target_th);
2788 }
2789
2790 return thread;
2791}
2792
2793int
2794rb_thread_to_be_killed(VALUE thread)
2795{
2796 rb_thread_t *target_th = rb_thread_ptr(thread);
2797
2798 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2799 return TRUE;
2800 }
2801 return FALSE;
2802}
2803
2804/*
2805 * call-seq:
2806 * Thread.kill(thread) -> thread
2807 *
2808 * Causes the given +thread+ to exit, see also Thread::exit.
2809 *
2810 * count = 0
2811 * a = Thread.new { loop { count += 1 } }
2812 * sleep(0.1) #=> 0
2813 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2814 * count #=> 93947
2815 * a.alive? #=> false
2816 */
2817
2818static VALUE
2819rb_thread_s_kill(VALUE obj, VALUE th)
2820{
2821 return rb_thread_kill(th);
2822}
2823
2824
2825/*
2826 * call-seq:
2827 * Thread.exit -> thread
2828 *
2829 * Terminates the currently running thread and schedules another thread to be
2830 * run.
2831 *
2832 * If this thread is already marked to be killed, ::exit returns the Thread.
2833 *
2834 * If this is the main thread, or the last thread, exit the process.
2835 */
2836
2837static VALUE
2838rb_thread_exit(VALUE _)
2839{
2840 rb_thread_t *th = GET_THREAD();
2841 return rb_thread_kill(th->self);
2842}
2843
2844
2845/*
2846 * call-seq:
2847 * thr.wakeup -> thr
2848 *
2849 * Marks a given thread as eligible for scheduling, however it may still
2850 * remain blocked on I/O.
2851 *
2852 * *Note:* This does not invoke the scheduler, see #run for more information.
2853 *
2854 * c = Thread.new { Thread.stop; puts "hey!" }
2855 * sleep 0.1 while c.status!='sleep'
2856 * c.wakeup
2857 * c.join
2858 * #=> "hey!"
2859 */
2860
2862rb_thread_wakeup(VALUE thread)
2863{
2864 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2865 rb_raise(rb_eThreadError, "killed thread");
2866 }
2867 return thread;
2868}
2869
2872{
2873 rb_thread_t *target_th = rb_thread_ptr(thread);
2874 if (target_th->status == THREAD_KILLED) return Qnil;
2875
2876 rb_threadptr_ready(target_th);
2877
2878 if (target_th->status == THREAD_STOPPED ||
2879 target_th->status == THREAD_STOPPED_FOREVER) {
2880 target_th->status = THREAD_RUNNABLE;
2881 }
2882
2883 return thread;
2884}
2885
2886
2887/*
2888 * call-seq:
2889 * thr.run -> thr
2890 *
2891 * Wakes up +thr+, making it eligible for scheduling.
2892 *
2893 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2894 * sleep 0.1 while a.status!='sleep'
2895 * puts "Got here"
2896 * a.run
2897 * a.join
2898 *
2899 * This will produce:
2900 *
2901 * a
2902 * Got here
2903 * c
2904 *
2905 * See also the instance method #wakeup.
2906 */
2907
2909rb_thread_run(VALUE thread)
2910{
2911 rb_thread_wakeup(thread);
2913 return thread;
2914}
2915
2916
2918rb_thread_stop(void)
2919{
2920 if (rb_thread_alone()) {
2921 rb_raise(rb_eThreadError,
2922 "stopping only thread\n\tnote: use sleep to stop forever");
2923 }
2925 return Qnil;
2926}
2927
2928/*
2929 * call-seq:
2930 * Thread.stop -> nil
2931 *
2932 * Stops execution of the current thread, putting it into a ``sleep'' state,
2933 * and schedules execution of another thread.
2934 *
2935 * a = Thread.new { print "a"; Thread.stop; print "c" }
2936 * sleep 0.1 while a.status!='sleep'
2937 * print "b"
2938 * a.run
2939 * a.join
2940 * #=> "abc"
2941 */
2942
2943static VALUE
2944thread_stop(VALUE _)
2945{
2946 return rb_thread_stop();
2947}
2948
2949/********************************************************************/
2950
2951VALUE
2952rb_thread_list(void)
2953{
2954 // TODO
2955 return rb_ractor_thread_list();
2956}
2957
2958/*
2959 * call-seq:
2960 * Thread.list -> array
2961 *
2962 * Returns an array of Thread objects for all threads that are either runnable
2963 * or stopped.
2964 *
2965 * Thread.new { sleep(200) }
2966 * Thread.new { 1000000.times {|i| i*i } }
2967 * Thread.new { Thread.stop }
2968 * Thread.list.each {|t| p t}
2969 *
2970 * This will produce:
2971 *
2972 * #<Thread:0x401b3e84 sleep>
2973 * #<Thread:0x401b3f38 run>
2974 * #<Thread:0x401b3fb0 sleep>
2975 * #<Thread:0x401bdf4c run>
2976 */
2977
2978static VALUE
2979thread_list(VALUE _)
2980{
2981 return rb_thread_list();
2982}
2983
2986{
2987 return GET_THREAD()->self;
2988}
2989
2990/*
2991 * call-seq:
2992 * Thread.current -> thread
2993 *
2994 * Returns the currently executing thread.
2995 *
2996 * Thread.current #=> #<Thread:0x401bdf4c run>
2997 */
2998
2999static VALUE
3000thread_s_current(VALUE klass)
3001{
3002 return rb_thread_current();
3003}
3004
3006rb_thread_main(void)
3007{
3008 return GET_RACTOR()->threads.main->self;
3009}
3010
3011/*
3012 * call-seq:
3013 * Thread.main -> thread
3014 *
3015 * Returns the main thread.
3016 */
3017
3018static VALUE
3019rb_thread_s_main(VALUE klass)
3020{
3021 return rb_thread_main();
3022}
3023
3024
3025/*
3026 * call-seq:
3027 * Thread.abort_on_exception -> true or false
3028 *
3029 * Returns the status of the global ``abort on exception'' condition.
3030 *
3031 * The default is +false+.
3032 *
3033 * When set to +true+, if any thread is aborted by an exception, the
3034 * raised exception will be re-raised in the main thread.
3035 *
3036 * Can also be specified by the global $DEBUG flag or command line option
3037 * +-d+.
3038 *
3039 * See also ::abort_on_exception=.
3040 *
3041 * There is also an instance level method to set this for a specific thread,
3042 * see #abort_on_exception.
3043 */
3044
3045static VALUE
3046rb_thread_s_abort_exc(VALUE _)
3047{
3048 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3049}
3050
3051
3052/*
3053 * call-seq:
3054 * Thread.abort_on_exception= boolean -> true or false
3055 *
3056 * When set to +true+, if any thread is aborted by an exception, the
3057 * raised exception will be re-raised in the main thread.
3058 * Returns the new state.
3059 *
3060 * Thread.abort_on_exception = true
3061 * t1 = Thread.new do
3062 * puts "In new thread"
3063 * raise "Exception from thread"
3064 * end
3065 * sleep(1)
3066 * puts "not reached"
3067 *
3068 * This will produce:
3069 *
3070 * In new thread
3071 * prog.rb:4: Exception from thread (RuntimeError)
3072 * from prog.rb:2:in `initialize'
3073 * from prog.rb:2:in `new'
3074 * from prog.rb:2
3075 *
3076 * See also ::abort_on_exception.
3077 *
3078 * There is also an instance level method to set this for a specific thread,
3079 * see #abort_on_exception=.
3080 */
3081
3082static VALUE
3083rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3084{
3085 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3086 return val;
3087}
3088
3089
3090/*
3091 * call-seq:
3092 * thr.abort_on_exception -> true or false
3093 *
3094 * Returns the status of the thread-local ``abort on exception'' condition for
3095 * this +thr+.
3096 *
3097 * The default is +false+.
3098 *
3099 * See also #abort_on_exception=.
3100 *
3101 * There is also a class level method to set this for all threads, see
3102 * ::abort_on_exception.
3103 */
3104
3105static VALUE
3106rb_thread_abort_exc(VALUE thread)
3107{
3108 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3109}
3110
3111
3112/*
3113 * call-seq:
3114 * thr.abort_on_exception= boolean -> true or false
3115 *
3116 * When set to +true+, if this +thr+ is aborted by an exception, the
3117 * raised exception will be re-raised in the main thread.
3118 *
3119 * See also #abort_on_exception.
3120 *
3121 * There is also a class level method to set this for all threads, see
3122 * ::abort_on_exception=.
3123 */
3124
3125static VALUE
3126rb_thread_abort_exc_set(VALUE thread, VALUE val)
3127{
3128 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3129 return val;
3130}
3131
3132
3133/*
3134 * call-seq:
3135 * Thread.report_on_exception -> true or false
3136 *
3137 * Returns the status of the global ``report on exception'' condition.
3138 *
3139 * The default is +true+ since Ruby 2.5.
3140 *
3141 * All threads created when this flag is true will report
3142 * a message on $stderr if an exception kills the thread.
3143 *
3144 * Thread.new { 1.times { raise } }
3145 *
3146 * will produce this output on $stderr:
3147 *
3148 * #<Thread:...> terminated with exception (report_on_exception is true):
3149 * Traceback (most recent call last):
3150 * 2: from -e:1:in `block in <main>'
3151 * 1: from -e:1:in `times'
3152 *
3153 * This is done to catch errors in threads early.
3154 * In some cases, you might not want this output.
3155 * There are multiple ways to avoid the extra output:
3156 *
3157 * * If the exception is not intended, the best is to fix the cause of
3158 * the exception so it does not happen anymore.
3159 * * If the exception is intended, it might be better to rescue it closer to
3160 * where it is raised rather then let it kill the Thread.
3161 * * If it is guaranteed the Thread will be joined with Thread#join or
3162 * Thread#value, then it is safe to disable this report with
3163 * <code>Thread.current.report_on_exception = false</code>
3164 * when starting the Thread.
3165 * However, this might handle the exception much later, or not at all
3166 * if the Thread is never joined due to the parent thread being blocked, etc.
3167 *
3168 * See also ::report_on_exception=.
3169 *
3170 * There is also an instance level method to set this for a specific thread,
3171 * see #report_on_exception=.
3172 *
3173 */
3174
3175static VALUE
3176rb_thread_s_report_exc(VALUE _)
3177{
3178 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3179}
3180
3181
3182/*
3183 * call-seq:
3184 * Thread.report_on_exception= boolean -> true or false
3185 *
3186 * Returns the new state.
3187 * When set to +true+, all threads created afterwards will inherit the
3188 * condition and report a message on $stderr if an exception kills a thread:
3189 *
3190 * Thread.report_on_exception = true
3191 * t1 = Thread.new do
3192 * puts "In new thread"
3193 * raise "Exception from thread"
3194 * end
3195 * sleep(1)
3196 * puts "In the main thread"
3197 *
3198 * This will produce:
3199 *
3200 * In new thread
3201 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3202 * Traceback (most recent call last):
3203 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3204 * In the main thread
3205 *
3206 * See also ::report_on_exception.
3207 *
3208 * There is also an instance level method to set this for a specific thread,
3209 * see #report_on_exception=.
3210 */
3211
3212static VALUE
3213rb_thread_s_report_exc_set(VALUE self, VALUE val)
3214{
3215 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3216 return val;
3217}
3218
3219
3220/*
3221 * call-seq:
3222 * Thread.ignore_deadlock -> true or false
3223 *
3224 * Returns the status of the global ``ignore deadlock'' condition.
3225 * The default is +false+, so that deadlock conditions are not ignored.
3226 *
3227 * See also ::ignore_deadlock=.
3228 *
3229 */
3230
3231static VALUE
3232rb_thread_s_ignore_deadlock(VALUE _)
3233{
3234 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3235}
3236
3237
3238/*
3239 * call-seq:
3240 * Thread.ignore_deadlock = boolean -> true or false
3241 *
3242 * Returns the new state.
3243 * When set to +true+, the VM will not check for deadlock conditions.
3244 * It is only useful to set this if your application can break a
3245 * deadlock condition via some other means, such as a signal.
3246 *
3247 * Thread.ignore_deadlock = true
3248 * queue = Thread::Queue.new
3249 *
3250 * trap(:SIGUSR1){queue.push "Received signal"}
3251 *
3252 * # raises fatal error unless ignoring deadlock
3253 * puts queue.pop
3254 *
3255 * See also ::ignore_deadlock.
3256 */
3257
3258static VALUE
3259rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3260{
3261 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3262 return val;
3263}
3264
3265
3266/*
3267 * call-seq:
3268 * thr.report_on_exception -> true or false
3269 *
3270 * Returns the status of the thread-local ``report on exception'' condition for
3271 * this +thr+.
3272 *
3273 * The default value when creating a Thread is the value of
3274 * the global flag Thread.report_on_exception.
3275 *
3276 * See also #report_on_exception=.
3277 *
3278 * There is also a class level method to set this for all new threads, see
3279 * ::report_on_exception=.
3280 */
3281
3282static VALUE
3283rb_thread_report_exc(VALUE thread)
3284{
3285 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3286}
3287
3288
3289/*
3290 * call-seq:
3291 * thr.report_on_exception= boolean -> true or false
3292 *
3293 * When set to +true+, a message is printed on $stderr if an exception
3294 * kills this +thr+. See ::report_on_exception for details.
3295 *
3296 * See also #report_on_exception.
3297 *
3298 * There is also a class level method to set this for all new threads, see
3299 * ::report_on_exception=.
3300 */
3301
3302static VALUE
3303rb_thread_report_exc_set(VALUE thread, VALUE val)
3304{
3305 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3306 return val;
3307}
3308
3309
3310/*
3311 * call-seq:
3312 * thr.group -> thgrp or nil
3313 *
3314 * Returns the ThreadGroup which contains the given thread.
3315 *
3316 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3317 */
3318
3319VALUE
3320rb_thread_group(VALUE thread)
3321{
3322 return rb_thread_ptr(thread)->thgroup;
3323}
3324
3325static const char *
3326thread_status_name(rb_thread_t *th, int detail)
3327{
3328 switch (th->status) {
3329 case THREAD_RUNNABLE:
3330 return th->to_kill ? "aborting" : "run";
3331 case THREAD_STOPPED_FOREVER:
3332 if (detail) return "sleep_forever";
3333 case THREAD_STOPPED:
3334 return "sleep";
3335 case THREAD_KILLED:
3336 return "dead";
3337 default:
3338 return "unknown";
3339 }
3340}
3341
3342static int
3343rb_threadptr_dead(rb_thread_t *th)
3344{
3345 return th->status == THREAD_KILLED;
3346}
3347
3348
3349/*
3350 * call-seq:
3351 * thr.status -> string, false or nil
3352 *
3353 * Returns the status of +thr+.
3354 *
3355 * [<tt>"sleep"</tt>]
3356 * Returned if this thread is sleeping or waiting on I/O
3357 * [<tt>"run"</tt>]
3358 * When this thread is executing
3359 * [<tt>"aborting"</tt>]
3360 * If this thread is aborting
3361 * [+false+]
3362 * When this thread is terminated normally
3363 * [+nil+]
3364 * If terminated with an exception.
3365 *
3366 * a = Thread.new { raise("die now") }
3367 * b = Thread.new { Thread.stop }
3368 * c = Thread.new { Thread.exit }
3369 * d = Thread.new { sleep }
3370 * d.kill #=> #<Thread:0x401b3678 aborting>
3371 * a.status #=> nil
3372 * b.status #=> "sleep"
3373 * c.status #=> false
3374 * d.status #=> "aborting"
3375 * Thread.current.status #=> "run"
3376 *
3377 * See also the instance methods #alive? and #stop?
3378 */
3379
3380static VALUE
3381rb_thread_status(VALUE thread)
3382{
3383 rb_thread_t *target_th = rb_thread_ptr(thread);
3384
3385 if (rb_threadptr_dead(target_th)) {
3386 if (!NIL_P(target_th->ec->errinfo) &&
3387 !FIXNUM_P(target_th->ec->errinfo)) {
3388 return Qnil;
3389 }
3390 else {
3391 return Qfalse;
3392 }
3393 }
3394 else {
3395 return rb_str_new2(thread_status_name(target_th, FALSE));
3396 }
3397}
3398
3399
3400/*
3401 * call-seq:
3402 * thr.alive? -> true or false
3403 *
3404 * Returns +true+ if +thr+ is running or sleeping.
3405 *
3406 * thr = Thread.new { }
3407 * thr.join #=> #<Thread:0x401b3fb0 dead>
3408 * Thread.current.alive? #=> true
3409 * thr.alive? #=> false
3410 *
3411 * See also #stop? and #status.
3412 */
3413
3414static VALUE
3415rb_thread_alive_p(VALUE thread)
3416{
3417 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3418}
3419
3420/*
3421 * call-seq:
3422 * thr.stop? -> true or false
3423 *
3424 * Returns +true+ if +thr+ is dead or sleeping.
3425 *
3426 * a = Thread.new { Thread.stop }
3427 * b = Thread.current
3428 * a.stop? #=> true
3429 * b.stop? #=> false
3430 *
3431 * See also #alive? and #status.
3432 */
3433
3434static VALUE
3435rb_thread_stop_p(VALUE thread)
3436{
3437 rb_thread_t *th = rb_thread_ptr(thread);
3438
3439 if (rb_threadptr_dead(th)) {
3440 return Qtrue;
3441 }
3442 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3443}
3444
3445/*
3446 * call-seq:
3447 * thr.name -> string
3448 *
3449 * show the name of the thread.
3450 */
3451
3452static VALUE
3453rb_thread_getname(VALUE thread)
3454{
3455 return rb_thread_ptr(thread)->name;
3456}
3457
3458/*
3459 * call-seq:
3460 * thr.name=(name) -> string
3461 *
3462 * set given name to the ruby thread.
3463 * On some platform, it may set the name to pthread and/or kernel.
3464 */
3465
3466static VALUE
3467rb_thread_setname(VALUE thread, VALUE name)
3468{
3469 rb_thread_t *target_th = rb_thread_ptr(thread);
3470
3471 if (!NIL_P(name)) {
3472 rb_encoding *enc;
3473 StringValueCStr(name);
3474 enc = rb_enc_get(name);
3475 if (!rb_enc_asciicompat(enc)) {
3476 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3477 rb_enc_name(enc));
3478 }
3479 name = rb_str_new_frozen(name);
3480 }
3481 target_th->name = name;
3482 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3483 native_set_another_thread_name(target_th->nt->thread_id, name);
3484 }
3485 return name;
3486}
3487
3488#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3489/*
3490 * call-seq:
3491 * thr.native_thread_id -> integer
3492 *
3493 * Return the native thread ID which is used by the Ruby thread.
3494 *
3495 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3496 * * On Linux it is TID returned by gettid(2).
3497 * * On macOS it is the system-wide unique integral ID of thread returned
3498 * by pthread_threadid_np(3).
3499 * * On FreeBSD it is the unique integral ID of the thread returned by
3500 * pthread_getthreadid_np(3).
3501 * * On Windows it is the thread identifier returned by GetThreadId().
3502 * * On other platforms, it raises NotImplementedError.
3503 *
3504 * NOTE:
3505 * If the thread is not associated yet or already deassociated with a native
3506 * thread, it returns _nil_.
3507 * If the Ruby implementation uses M:N thread model, the ID may change
3508 * depending on the timing.
3509 */
3510
3511static VALUE
3512rb_thread_native_thread_id(VALUE thread)
3513{
3514 rb_thread_t *target_th = rb_thread_ptr(thread);
3515 if (rb_threadptr_dead(target_th)) return Qnil;
3516 return native_thread_native_thread_id(target_th);
3517}
3518#else
3519# define rb_thread_native_thread_id rb_f_notimplement
3520#endif
3521
3522/*
3523 * call-seq:
3524 * thr.to_s -> string
3525 *
3526 * Dump the name, id, and status of _thr_ to a string.
3527 */
3528
3529static VALUE
3530rb_thread_to_s(VALUE thread)
3531{
3532 VALUE cname = rb_class_path(rb_obj_class(thread));
3533 rb_thread_t *target_th = rb_thread_ptr(thread);
3534 const char *status;
3535 VALUE str, loc;
3536
3537 status = thread_status_name(target_th, TRUE);
3538 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3539 if (!NIL_P(target_th->name)) {
3540 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3541 }
3542 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3543 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3544 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3545 }
3546 rb_str_catf(str, " %s>", status);
3547
3548 return str;
3549}
3550
3551/* variables for recursive traversals */
3552#define recursive_key id__recursive_key__
3553
3554static VALUE
3555threadptr_local_aref(rb_thread_t *th, ID id)
3556{
3557 if (id == recursive_key) {
3558 return th->ec->local_storage_recursive_hash;
3559 }
3560 else {
3561 VALUE val;
3562 struct rb_id_table *local_storage = th->ec->local_storage;
3563
3564 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3565 return val;
3566 }
3567 else {
3568 return Qnil;
3569 }
3570 }
3571}
3572
3574rb_thread_local_aref(VALUE thread, ID id)
3575{
3576 return threadptr_local_aref(rb_thread_ptr(thread), id);
3577}
3578
3579/*
3580 * call-seq:
3581 * thr[sym] -> obj or nil
3582 *
3583 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3584 * if not explicitly inside a Fiber), using either a symbol or a string name.
3585 * If the specified variable does not exist, returns +nil+.
3586 *
3587 * [
3588 * Thread.new { Thread.current["name"] = "A" },
3589 * Thread.new { Thread.current[:name] = "B" },
3590 * Thread.new { Thread.current["name"] = "C" }
3591 * ].each do |th|
3592 * th.join
3593 * puts "#{th.inspect}: #{th[:name]}"
3594 * end
3595 *
3596 * This will produce:
3597 *
3598 * #<Thread:0x00000002a54220 dead>: A
3599 * #<Thread:0x00000002a541a8 dead>: B
3600 * #<Thread:0x00000002a54130 dead>: C
3601 *
3602 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3603 * This confusion did not exist in Ruby 1.8 because
3604 * fibers are only available since Ruby 1.9.
3605 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3606 * following idiom for dynamic scope.
3607 *
3608 * def meth(newvalue)
3609 * begin
3610 * oldvalue = Thread.current[:name]
3611 * Thread.current[:name] = newvalue
3612 * yield
3613 * ensure
3614 * Thread.current[:name] = oldvalue
3615 * end
3616 * end
3617 *
3618 * The idiom may not work as dynamic scope if the methods are thread-local
3619 * and a given block switches fiber.
3620 *
3621 * f = Fiber.new {
3622 * meth(1) {
3623 * Fiber.yield
3624 * }
3625 * }
3626 * meth(2) {
3627 * f.resume
3628 * }
3629 * f.resume
3630 * p Thread.current[:name]
3631 * #=> nil if fiber-local
3632 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3633 *
3634 * For thread-local variables, please see #thread_variable_get and
3635 * #thread_variable_set.
3636 *
3637 */
3638
3639static VALUE
3640rb_thread_aref(VALUE thread, VALUE key)
3641{
3642 ID id = rb_check_id(&key);
3643 if (!id) return Qnil;
3644 return rb_thread_local_aref(thread, id);
3645}
3646
3647/*
3648 * call-seq:
3649 * thr.fetch(sym) -> obj
3650 * thr.fetch(sym) { } -> obj
3651 * thr.fetch(sym, default) -> obj
3652 *
3653 * Returns a fiber-local for the given key. If the key can't be
3654 * found, there are several options: With no other arguments, it will
3655 * raise a KeyError exception; if <i>default</i> is given, then that
3656 * will be returned; if the optional code block is specified, then
3657 * that will be run and its result returned. See Thread#[] and
3658 * Hash#fetch.
3659 */
3660static VALUE
3661rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3662{
3663 VALUE key, val;
3664 ID id;
3665 rb_thread_t *target_th = rb_thread_ptr(self);
3666 int block_given;
3667
3668 rb_check_arity(argc, 1, 2);
3669 key = argv[0];
3670
3671 block_given = rb_block_given_p();
3672 if (block_given && argc == 2) {
3673 rb_warn("block supersedes default value argument");
3674 }
3675
3676 id = rb_check_id(&key);
3677
3678 if (id == recursive_key) {
3679 return target_th->ec->local_storage_recursive_hash;
3680 }
3681 else if (id && target_th->ec->local_storage &&
3682 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3683 return val;
3684 }
3685 else if (block_given) {
3686 return rb_yield(key);
3687 }
3688 else if (argc == 1) {
3689 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3690 }
3691 else {
3692 return argv[1];
3693 }
3694}
3695
3696static VALUE
3697threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3698{
3699 if (id == recursive_key) {
3700 th->ec->local_storage_recursive_hash = val;
3701 return val;
3702 }
3703 else {
3704 struct rb_id_table *local_storage = th->ec->local_storage;
3705
3706 if (NIL_P(val)) {
3707 if (!local_storage) return Qnil;
3708 rb_id_table_delete(local_storage, id);
3709 return Qnil;
3710 }
3711 else {
3712 if (local_storage == NULL) {
3713 th->ec->local_storage = local_storage = rb_id_table_create(0);
3714 }
3715 rb_id_table_insert(local_storage, id, val);
3716 return val;
3717 }
3718 }
3719}
3720
3722rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3723{
3724 if (OBJ_FROZEN(thread)) {
3725 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3726 }
3727
3728 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3729}
3730
3731/*
3732 * call-seq:
3733 * thr[sym] = obj -> obj
3734 *
3735 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3736 * using either a symbol or a string.
3737 *
3738 * See also Thread#[].
3739 *
3740 * For thread-local variables, please see #thread_variable_set and
3741 * #thread_variable_get.
3742 */
3743
3744static VALUE
3745rb_thread_aset(VALUE self, VALUE id, VALUE val)
3746{
3747 return rb_thread_local_aset(self, rb_to_id(id), val);
3748}
3749
3750/*
3751 * call-seq:
3752 * thr.thread_variable_get(key) -> obj or nil
3753 *
3754 * Returns the value of a thread local variable that has been set. Note that
3755 * these are different than fiber local values. For fiber local values,
3756 * please see Thread#[] and Thread#[]=.
3757 *
3758 * Thread local values are carried along with threads, and do not respect
3759 * fibers. For example:
3760 *
3761 * Thread.new {
3762 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3763 * Thread.current["foo"] = "bar" # set a fiber local
3764 *
3765 * Fiber.new {
3766 * Fiber.yield [
3767 * Thread.current.thread_variable_get("foo"), # get the thread local
3768 * Thread.current["foo"], # get the fiber local
3769 * ]
3770 * }.resume
3771 * }.join.value # => ['bar', nil]
3772 *
3773 * The value "bar" is returned for the thread local, where nil is returned
3774 * for the fiber local. The fiber is executed in the same thread, so the
3775 * thread local values are available.
3776 */
3777
3778static VALUE
3779rb_thread_variable_get(VALUE thread, VALUE key)
3780{
3781 VALUE locals;
3782 VALUE symbol = rb_to_symbol(key);
3783
3784 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3785 return Qnil;
3786 }
3787 locals = rb_thread_local_storage(thread);
3788 return rb_hash_aref(locals, symbol);
3789}
3790
3791/*
3792 * call-seq:
3793 * thr.thread_variable_set(key, value)
3794 *
3795 * Sets a thread local with +key+ to +value+. Note that these are local to
3796 * threads, and not to fibers. Please see Thread#thread_variable_get and
3797 * Thread#[] for more information.
3798 */
3799
3800static VALUE
3801rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3802{
3803 VALUE locals;
3804
3805 if (OBJ_FROZEN(thread)) {
3806 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3807 }
3808
3809 locals = rb_thread_local_storage(thread);
3810 return rb_hash_aset(locals, rb_to_symbol(key), val);
3811}
3812
3813/*
3814 * call-seq:
3815 * thr.key?(sym) -> true or false
3816 *
3817 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3818 * variable.
3819 *
3820 * me = Thread.current
3821 * me[:oliver] = "a"
3822 * me.key?(:oliver) #=> true
3823 * me.key?(:stanley) #=> false
3824 */
3825
3826static VALUE
3827rb_thread_key_p(VALUE self, VALUE key)
3828{
3829 VALUE val;
3830 ID id = rb_check_id(&key);
3831 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3832
3833 if (!id || local_storage == NULL) {
3834 return Qfalse;
3835 }
3836 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3837}
3838
3839static enum rb_id_table_iterator_result
3840thread_keys_i(ID key, VALUE value, void *ary)
3841{
3842 rb_ary_push((VALUE)ary, ID2SYM(key));
3843 return ID_TABLE_CONTINUE;
3844}
3845
3847rb_thread_alone(void)
3848{
3849 // TODO
3850 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3851}
3852
3853/*
3854 * call-seq:
3855 * thr.keys -> array
3856 *
3857 * Returns an array of the names of the fiber-local variables (as Symbols).
3858 *
3859 * thr = Thread.new do
3860 * Thread.current[:cat] = 'meow'
3861 * Thread.current["dog"] = 'woof'
3862 * end
3863 * thr.join #=> #<Thread:0x401b3f10 dead>
3864 * thr.keys #=> [:dog, :cat]
3865 */
3866
3867static VALUE
3868rb_thread_keys(VALUE self)
3869{
3870 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3871 VALUE ary = rb_ary_new();
3872
3873 if (local_storage) {
3874 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3875 }
3876 return ary;
3877}
3878
3879static int
3880keys_i(VALUE key, VALUE value, VALUE ary)
3881{
3882 rb_ary_push(ary, key);
3883 return ST_CONTINUE;
3884}
3885
3886/*
3887 * call-seq:
3888 * thr.thread_variables -> array
3889 *
3890 * Returns an array of the names of the thread-local variables (as Symbols).
3891 *
3892 * thr = Thread.new do
3893 * Thread.current.thread_variable_set(:cat, 'meow')
3894 * Thread.current.thread_variable_set("dog", 'woof')
3895 * end
3896 * thr.join #=> #<Thread:0x401b3f10 dead>
3897 * thr.thread_variables #=> [:dog, :cat]
3898 *
3899 * Note that these are not fiber local variables. Please see Thread#[] and
3900 * Thread#thread_variable_get for more details.
3901 */
3902
3903static VALUE
3904rb_thread_variables(VALUE thread)
3905{
3906 VALUE locals;
3907 VALUE ary;
3908
3909 ary = rb_ary_new();
3910 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3911 return ary;
3912 }
3913 locals = rb_thread_local_storage(thread);
3914 rb_hash_foreach(locals, keys_i, ary);
3915
3916 return ary;
3917}
3918
3919/*
3920 * call-seq:
3921 * thr.thread_variable?(key) -> true or false
3922 *
3923 * Returns +true+ if the given string (or symbol) exists as a thread-local
3924 * variable.
3925 *
3926 * me = Thread.current
3927 * me.thread_variable_set(:oliver, "a")
3928 * me.thread_variable?(:oliver) #=> true
3929 * me.thread_variable?(:stanley) #=> false
3930 *
3931 * Note that these are not fiber local variables. Please see Thread#[] and
3932 * Thread#thread_variable_get for more details.
3933 */
3934
3935static VALUE
3936rb_thread_variable_p(VALUE thread, VALUE key)
3937{
3938 VALUE locals;
3939 VALUE symbol = rb_to_symbol(key);
3940
3941 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3942 return Qfalse;
3943 }
3944 locals = rb_thread_local_storage(thread);
3945
3946 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
3947}
3948
3949/*
3950 * call-seq:
3951 * thr.priority -> integer
3952 *
3953 * Returns the priority of <i>thr</i>. Default is inherited from the
3954 * current thread which creating the new thread, or zero for the
3955 * initial main thread; higher-priority thread will run more frequently
3956 * than lower-priority threads (but lower-priority threads can also run).
3957 *
3958 * This is just hint for Ruby thread scheduler. It may be ignored on some
3959 * platform.
3960 *
3961 * Thread.current.priority #=> 0
3962 */
3963
3964static VALUE
3965rb_thread_priority(VALUE thread)
3966{
3967 return INT2NUM(rb_thread_ptr(thread)->priority);
3968}
3969
3970
3971/*
3972 * call-seq:
3973 * thr.priority= integer -> thr
3974 *
3975 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3976 * will run more frequently than lower-priority threads (but lower-priority
3977 * threads can also run).
3978 *
3979 * This is just hint for Ruby thread scheduler. It may be ignored on some
3980 * platform.
3981 *
3982 * count1 = count2 = 0
3983 * a = Thread.new do
3984 * loop { count1 += 1 }
3985 * end
3986 * a.priority = -1
3987 *
3988 * b = Thread.new do
3989 * loop { count2 += 1 }
3990 * end
3991 * b.priority = -2
3992 * sleep 1 #=> 1
3993 * count1 #=> 622504
3994 * count2 #=> 5832
3995 */
3996
3997static VALUE
3998rb_thread_priority_set(VALUE thread, VALUE prio)
3999{
4000 rb_thread_t *target_th = rb_thread_ptr(thread);
4001 int priority;
4002
4003#if USE_NATIVE_THREAD_PRIORITY
4004 target_th->priority = NUM2INT(prio);
4005 native_thread_apply_priority(th);
4006#else
4007 priority = NUM2INT(prio);
4008 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4009 priority = RUBY_THREAD_PRIORITY_MAX;
4010 }
4011 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4012 priority = RUBY_THREAD_PRIORITY_MIN;
4013 }
4014 target_th->priority = (int8_t)priority;
4015#endif
4016 return INT2NUM(target_th->priority);
4017}
4018
4019/* for IO */
4020
4021#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4022
4023/*
4024 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4025 * in select(2) system call.
4026 *
4027 * - Linux 2.2.12 (?)
4028 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4029 * select(2) documents how to allocate fd_set dynamically.
4030 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4031 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4032 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4033 * select(2) documents how to allocate fd_set dynamically.
4034 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4035 * - Solaris 8 has select_large_fdset
4036 * - Mac OS X 10.7 (Lion)
4037 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4038 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4039 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4040 *
4041 * When fd_set is not big enough to hold big file descriptors,
4042 * it should be allocated dynamically.
4043 * Note that this assumes fd_set is structured as bitmap.
4044 *
4045 * rb_fd_init allocates the memory.
4046 * rb_fd_term free the memory.
4047 * rb_fd_set may re-allocates bitmap.
4048 *
4049 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4050 */
4051
4052void
4054{
4055 fds->maxfd = 0;
4056 fds->fdset = ALLOC(fd_set);
4057 FD_ZERO(fds->fdset);
4058}
4059
4060void
4061rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4062{
4063 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4064
4065 if (size < sizeof(fd_set))
4066 size = sizeof(fd_set);
4067 dst->maxfd = src->maxfd;
4068 dst->fdset = xmalloc(size);
4069 memcpy(dst->fdset, src->fdset, size);
4070}
4071
4072void
4074{
4075 xfree(fds->fdset);
4076 fds->maxfd = 0;
4077 fds->fdset = 0;
4078}
4079
4080void
4082{
4083 if (fds->fdset)
4084 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4085}
4086
4087static void
4088rb_fd_resize(int n, rb_fdset_t *fds)
4089{
4090 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4091 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4092
4093 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4094 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4095
4096 if (m > o) {
4097 fds->fdset = xrealloc(fds->fdset, m);
4098 memset((char *)fds->fdset + o, 0, m - o);
4099 }
4100 if (n >= fds->maxfd) fds->maxfd = n + 1;
4101}
4102
4103void
4104rb_fd_set(int n, rb_fdset_t *fds)
4105{
4106 rb_fd_resize(n, fds);
4107 FD_SET(n, fds->fdset);
4108}
4109
4110void
4111rb_fd_clr(int n, rb_fdset_t *fds)
4112{
4113 if (n >= fds->maxfd) return;
4114 FD_CLR(n, fds->fdset);
4115}
4116
4117int
4118rb_fd_isset(int n, const rb_fdset_t *fds)
4119{
4120 if (n >= fds->maxfd) return 0;
4121 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4122}
4123
4124void
4125rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4126{
4127 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4128
4129 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4130 dst->maxfd = max;
4131 dst->fdset = xrealloc(dst->fdset, size);
4132 memcpy(dst->fdset, src, size);
4133}
4134
4135void
4136rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4137{
4138 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4139
4140 if (size < sizeof(fd_set))
4141 size = sizeof(fd_set);
4142 dst->maxfd = src->maxfd;
4143 dst->fdset = xrealloc(dst->fdset, size);
4144 memcpy(dst->fdset, src->fdset, size);
4145}
4146
4147int
4148rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4149{
4150 fd_set *r = NULL, *w = NULL, *e = NULL;
4151 if (readfds) {
4152 rb_fd_resize(n - 1, readfds);
4153 r = rb_fd_ptr(readfds);
4154 }
4155 if (writefds) {
4156 rb_fd_resize(n - 1, writefds);
4157 w = rb_fd_ptr(writefds);
4158 }
4159 if (exceptfds) {
4160 rb_fd_resize(n - 1, exceptfds);
4161 e = rb_fd_ptr(exceptfds);
4162 }
4163 return select(n, r, w, e, timeout);
4164}
4165
4166#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4167
4168#undef FD_ZERO
4169#undef FD_SET
4170#undef FD_CLR
4171#undef FD_ISSET
4172
4173#define FD_ZERO(f) rb_fd_zero(f)
4174#define FD_SET(i, f) rb_fd_set((i), (f))
4175#define FD_CLR(i, f) rb_fd_clr((i), (f))
4176#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4177
4178#elif defined(_WIN32)
4179
4180void
4182{
4183 set->capa = FD_SETSIZE;
4184 set->fdset = ALLOC(fd_set);
4185 FD_ZERO(set->fdset);
4186}
4187
4188void
4189rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4190{
4191 rb_fd_init(dst);
4192 rb_fd_dup(dst, src);
4193}
4194
4195void
4197{
4198 xfree(set->fdset);
4199 set->fdset = NULL;
4200 set->capa = 0;
4201}
4202
4203void
4204rb_fd_set(int fd, rb_fdset_t *set)
4205{
4206 unsigned int i;
4207 SOCKET s = rb_w32_get_osfhandle(fd);
4208
4209 for (i = 0; i < set->fdset->fd_count; i++) {
4210 if (set->fdset->fd_array[i] == s) {
4211 return;
4212 }
4213 }
4214 if (set->fdset->fd_count >= (unsigned)set->capa) {
4215 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4216 set->fdset =
4217 rb_xrealloc_mul_add(
4218 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4219 }
4220 set->fdset->fd_array[set->fdset->fd_count++] = s;
4221}
4222
4223#undef FD_ZERO
4224#undef FD_SET
4225#undef FD_CLR
4226#undef FD_ISSET
4227
4228#define FD_ZERO(f) rb_fd_zero(f)
4229#define FD_SET(i, f) rb_fd_set((i), (f))
4230#define FD_CLR(i, f) rb_fd_clr((i), (f))
4231#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4232
4233#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4234
4235#endif
4236
4237#ifndef rb_fd_no_init
4238#define rb_fd_no_init(fds) (void)(fds)
4239#endif
4240
4241static int
4242wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4243{
4244 int r = *result;
4245 if (r < 0) {
4246 switch (errnum) {
4247 case EINTR:
4248#ifdef ERESTART
4249 case ERESTART:
4250#endif
4251 *result = 0;
4252 if (rel && hrtime_update_expire(rel, end)) {
4253 *rel = 0;
4254 }
4255 return TRUE;
4256 }
4257 return FALSE;
4258 }
4259 else if (r == 0) {
4260 /* check for spurious wakeup */
4261 if (rel) {
4262 return !hrtime_update_expire(rel, end);
4263 }
4264 return TRUE;
4265 }
4266 return FALSE;
4267}
4269struct select_set {
4270 int max;
4271 rb_thread_t *th;
4272 rb_fdset_t *rset;
4273 rb_fdset_t *wset;
4274 rb_fdset_t *eset;
4275 rb_fdset_t orig_rset;
4276 rb_fdset_t orig_wset;
4277 rb_fdset_t orig_eset;
4278 struct timeval *timeout;
4279};
4280
4281static VALUE
4282select_set_free(VALUE p)
4283{
4284 struct select_set *set = (struct select_set *)p;
4285
4286 rb_fd_term(&set->orig_rset);
4287 rb_fd_term(&set->orig_wset);
4288 rb_fd_term(&set->orig_eset);
4289
4290 return Qfalse;
4291}
4292
4293static VALUE
4294do_select(VALUE p)
4295{
4296 struct select_set *set = (struct select_set *)p;
4297 volatile int result = 0;
4298 int lerrno;
4299 rb_hrtime_t *to, rel, end = 0;
4300
4301 timeout_prepare(&to, &rel, &end, set->timeout);
4302 volatile rb_hrtime_t endtime = end;
4303#define restore_fdset(dst, src) \
4304 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4305#define do_select_update() \
4306 (restore_fdset(set->rset, &set->orig_rset), \
4307 restore_fdset(set->wset, &set->orig_wset), \
4308 restore_fdset(set->eset, &set->orig_eset), \
4309 TRUE)
4310
4311 do {
4312 lerrno = 0;
4313
4314 BLOCKING_REGION(set->th, {
4315 struct timeval tv;
4316
4317 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4318 result = native_fd_select(set->max,
4319 set->rset, set->wset, set->eset,
4320 rb_hrtime2timeval(&tv, to), set->th);
4321 if (result < 0) lerrno = errno;
4322 }
4323 }, ubf_select, set->th, TRUE);
4324
4325 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4326 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4327
4328 if (result < 0) {
4329 errno = lerrno;
4330 }
4331
4332 return (VALUE)result;
4333}
4334
4336rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4337 struct timeval *timeout)
4338{
4339 struct select_set set;
4340
4341 set.th = GET_THREAD();
4342 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4343 set.max = max;
4344 set.rset = read;
4345 set.wset = write;
4346 set.eset = except;
4347 set.timeout = timeout;
4348
4349 if (!set.rset && !set.wset && !set.eset) {
4350 if (!timeout) {
4352 return 0;
4353 }
4354 rb_thread_wait_for(*timeout);
4355 return 0;
4356 }
4357
4358#define fd_init_copy(f) do { \
4359 if (set.f) { \
4360 rb_fd_resize(set.max - 1, set.f); \
4361 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4362 rb_fd_init_copy(&set.orig_##f, set.f); \
4363 } \
4364 } \
4365 else { \
4366 rb_fd_no_init(&set.orig_##f); \
4367 } \
4368 } while (0)
4369 fd_init_copy(rset);
4370 fd_init_copy(wset);
4371 fd_init_copy(eset);
4372#undef fd_init_copy
4373
4374 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4375}
4376
4377#ifdef USE_POLL
4378
4379/* The same with linux kernel. TODO: make platform independent definition. */
4380#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4381#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4382#define POLLEX_SET (POLLPRI)
4383
4384#ifndef POLLERR_SET /* defined for FreeBSD for now */
4385# define POLLERR_SET (0)
4386#endif
4387
4388static int
4389wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4390 rb_hrtime_t *const to, volatile int *lerrno)
4391{
4392 struct timespec ts;
4393 volatile int result = 0;
4394
4395 *lerrno = 0;
4396 BLOCKING_REGION(th, {
4397 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4398 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4399 if (result < 0) *lerrno = errno;
4400 }
4401 }, ubf_select, th, TRUE);
4402 return result;
4403}
4404
4405/*
4406 * returns a mask of events
4407 */
4408int
4409rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4410{
4411 struct pollfd fds[1] = {{
4412 .fd = fd,
4413 .events = (short)events,
4414 .revents = 0,
4415 }};
4416 volatile int result = 0;
4417 nfds_t nfds;
4418 struct waiting_fd wfd;
4419 enum ruby_tag_type state;
4420 volatile int lerrno;
4421
4422 rb_execution_context_t *ec = GET_EC();
4423 rb_thread_t *th = rb_ec_thread_ptr(ec);
4424
4425 thread_io_setup_wfd(th, fd, &wfd);
4426
4427 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4428 // fd is readable
4429 state = 0;
4430 fds[0].revents = events;
4431 errno = 0;
4432 }
4433 else {
4434 EC_PUSH_TAG(wfd.th->ec);
4435 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4436 rb_hrtime_t *to, rel, end = 0;
4437 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4438 timeout_prepare(&to, &rel, &end, timeout);
4439 do {
4440 nfds = numberof(fds);
4441 result = wait_for_single_fd_blocking_region(wfd.th, fds, nfds, to, &lerrno);
4442
4443 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4444 } while (wait_retryable(&result, lerrno, to, end));
4445 }
4446 EC_POP_TAG();
4447 }
4448
4449 thread_io_wake_pending_closer(&wfd);
4450
4451 if (state) {
4452 EC_JUMP_TAG(wfd.th->ec, state);
4453 }
4454
4455 if (result < 0) {
4456 errno = lerrno;
4457 return -1;
4458 }
4459
4460 if (fds[0].revents & POLLNVAL) {
4461 errno = EBADF;
4462 return -1;
4463 }
4464
4465 /*
4466 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4467 * Therefore we need to fix it up.
4468 */
4469 result = 0;
4470 if (fds[0].revents & POLLIN_SET)
4471 result |= RB_WAITFD_IN;
4472 if (fds[0].revents & POLLOUT_SET)
4473 result |= RB_WAITFD_OUT;
4474 if (fds[0].revents & POLLEX_SET)
4475 result |= RB_WAITFD_PRI;
4476
4477 /* all requested events are ready if there is an error */
4478 if (fds[0].revents & POLLERR_SET)
4479 result |= events;
4480
4481 return result;
4482}
4483#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4484struct select_args {
4485 union {
4486 int fd;
4487 int error;
4488 } as;
4489 rb_fdset_t *read;
4490 rb_fdset_t *write;
4491 rb_fdset_t *except;
4492 struct waiting_fd wfd;
4493 struct timeval *tv;
4494};
4495
4496static VALUE
4497select_single(VALUE ptr)
4498{
4499 struct select_args *args = (struct select_args *)ptr;
4500 int r;
4501
4502 r = rb_thread_fd_select(args->as.fd + 1,
4503 args->read, args->write, args->except, args->tv);
4504 if (r == -1)
4505 args->as.error = errno;
4506 if (r > 0) {
4507 r = 0;
4508 if (args->read && rb_fd_isset(args->as.fd, args->read))
4509 r |= RB_WAITFD_IN;
4510 if (args->write && rb_fd_isset(args->as.fd, args->write))
4511 r |= RB_WAITFD_OUT;
4512 if (args->except && rb_fd_isset(args->as.fd, args->except))
4513 r |= RB_WAITFD_PRI;
4514 }
4515 return (VALUE)r;
4516}
4517
4518static VALUE
4519select_single_cleanup(VALUE ptr)
4520{
4521 struct select_args *args = (struct select_args *)ptr;
4522
4523 thread_io_wake_pending_closer(&args->wfd);
4524 if (args->read) rb_fd_term(args->read);
4525 if (args->write) rb_fd_term(args->write);
4526 if (args->except) rb_fd_term(args->except);
4527
4528 return (VALUE)-1;
4529}
4530
4531static rb_fdset_t *
4532init_set_fd(int fd, rb_fdset_t *fds)
4533{
4534 if (fd < 0) {
4535 return 0;
4536 }
4537 rb_fd_init(fds);
4538 rb_fd_set(fd, fds);
4539
4540 return fds;
4541}
4542
4543int
4544rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4545{
4546 rb_fdset_t rfds, wfds, efds;
4547 struct select_args args;
4548 int r;
4549 VALUE ptr = (VALUE)&args;
4550 rb_execution_context_t *ec = GET_EC();
4551 rb_thread_t *th = rb_ec_thread_ptr(ec);
4552
4553 args.as.fd = fd;
4554 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4555 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4556 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4557 args.tv = timeout;
4558 thread_io_setup_wfd(th, fd, &args.wfd);
4559
4560 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4561 if (r == -1)
4562 errno = args.as.error;
4563
4564 return r;
4565}
4566#endif /* ! USE_POLL */
4567
4568/*
4569 * for GC
4570 */
4571
4572#ifdef USE_CONSERVATIVE_STACK_END
4573void
4574rb_gc_set_stack_end(VALUE **stack_end_p)
4575{
4576 VALUE stack_end;
4577COMPILER_WARNING_PUSH
4578#if __has_warning("-Wdangling-pointer")
4579COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4580#endif
4581 *stack_end_p = &stack_end;
4582COMPILER_WARNING_POP
4583}
4584#endif
4585
4586/*
4587 *
4588 */
4589
4590void
4591rb_threadptr_check_signal(rb_thread_t *mth)
4592{
4593 /* mth must be main_thread */
4594 if (rb_signal_buff_size() > 0) {
4595 /* wakeup main thread */
4596 threadptr_trap_interrupt(mth);
4597 }
4598}
4599
4600static void
4601async_bug_fd(const char *mesg, int errno_arg, int fd)
4602{
4603 char buff[64];
4604 size_t n = strlcpy(buff, mesg, sizeof(buff));
4605 if (n < sizeof(buff)-3) {
4606 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4607 }
4608 rb_async_bug_errno(buff, errno_arg);
4609}
4610
4611/* VM-dependent API is not available for this function */
4612static int
4613consume_communication_pipe(int fd)
4614{
4615#if USE_EVENTFD
4616 uint64_t buff[1];
4617#else
4618 /* buffer can be shared because no one refers to them. */
4619 static char buff[1024];
4620#endif
4621 ssize_t result;
4622 int ret = FALSE; /* for rb_sigwait_sleep */
4623
4624 while (1) {
4625 result = read(fd, buff, sizeof(buff));
4626#if USE_EVENTFD
4627 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4628#else
4629 RUBY_DEBUG_LOG("result:%d", (int)result);
4630#endif
4631 if (result > 0) {
4632 ret = TRUE;
4633 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4634 return ret;
4635 }
4636 }
4637 else if (result == 0) {
4638 return ret;
4639 }
4640 else if (result < 0) {
4641 int e = errno;
4642 switch (e) {
4643 case EINTR:
4644 continue; /* retry */
4645 case EAGAIN:
4646#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4647 case EWOULDBLOCK:
4648#endif
4649 return ret;
4650 default:
4651 async_bug_fd("consume_communication_pipe: read", e, fd);
4652 }
4653 }
4654 }
4655}
4656
4657void
4658rb_thread_stop_timer_thread(void)
4659{
4660 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4661 native_reset_timer_thread();
4662 }
4663}
4664
4665void
4666rb_thread_reset_timer_thread(void)
4667{
4668 native_reset_timer_thread();
4669}
4670
4671void
4672rb_thread_start_timer_thread(void)
4673{
4674 system_working = 1;
4675 rb_thread_create_timer_thread();
4676}
4677
4678static int
4679clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4680{
4681 int i;
4682 VALUE coverage = (VALUE)val;
4683 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4684 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4685
4686 if (lines) {
4687 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4688 rb_ary_clear(lines);
4689 }
4690 else {
4691 int i;
4692 for (i = 0; i < RARRAY_LEN(lines); i++) {
4693 if (RARRAY_AREF(lines, i) != Qnil)
4694 RARRAY_ASET(lines, i, INT2FIX(0));
4695 }
4696 }
4697 }
4698 if (branches) {
4699 VALUE counters = RARRAY_AREF(branches, 1);
4700 for (i = 0; i < RARRAY_LEN(counters); i++) {
4701 RARRAY_ASET(counters, i, INT2FIX(0));
4702 }
4703 }
4704
4705 return ST_CONTINUE;
4706}
4707
4708void
4709rb_clear_coverages(void)
4710{
4711 VALUE coverages = rb_get_coverages();
4712 if (RTEST(coverages)) {
4713 rb_hash_foreach(coverages, clear_coverage_i, 0);
4714 }
4715}
4716
4717#if defined(HAVE_WORKING_FORK)
4718
4719static void
4720rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4721{
4722 rb_thread_t *i = 0;
4723 rb_vm_t *vm = th->vm;
4724 rb_ractor_t *r = th->ractor;
4725 vm->ractor.main_ractor = r;
4726 vm->ractor.main_thread = th;
4727 r->threads.main = th;
4728 r->status_ = ractor_created;
4729
4730 thread_sched_atfork(TH_SCHED(th));
4731 ubf_list_atfork();
4732
4733 // OK. Only this thread accesses:
4734 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4735 ccan_list_for_each(&r->threads.set, i, lt_node) {
4736 atfork(i, th);
4737 }
4738 }
4739 rb_vm_living_threads_init(vm);
4740
4741 rb_ractor_atfork(vm, th);
4742 rb_vm_postponed_job_atfork();
4743
4744 /* may be held by RJIT threads in parent */
4745 rb_native_mutex_initialize(&vm->workqueue_lock);
4746
4747 /* may be held by any thread in parent */
4748 rb_native_mutex_initialize(&th->interrupt_lock);
4749 ccan_list_head_init(&th->interrupt_exec_tasks);
4750
4751 vm->fork_gen++;
4752 rb_ractor_sleeper_threads_clear(th->ractor);
4753 rb_clear_coverages();
4754
4755 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4756 rb_thread_reset_timer_thread();
4757 rb_thread_start_timer_thread();
4758
4759 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4760 VM_ASSERT(vm->ractor.cnt == 1);
4761}
4762
4763static void
4764terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4765{
4766 if (th != current_th) {
4767 rb_mutex_abandon_keeping_mutexes(th);
4768 rb_mutex_abandon_locking_mutex(th);
4769 thread_cleanup_func(th, TRUE);
4770 }
4771}
4772
4773void rb_fiber_atfork(rb_thread_t *);
4774void
4775rb_thread_atfork(void)
4776{
4777 rb_thread_t *th = GET_THREAD();
4778 rb_threadptr_pending_interrupt_clear(th);
4779 rb_thread_atfork_internal(th, terminate_atfork_i);
4780 th->join_list = NULL;
4781 rb_fiber_atfork(th);
4782
4783 /* We don't want reproduce CVE-2003-0900. */
4785}
4786
4787static void
4788terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4789{
4790 if (th != current_th) {
4791 thread_cleanup_func_before_exec(th);
4792 }
4793}
4794
4795void
4797{
4798 rb_thread_t *th = GET_THREAD();
4799 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4800}
4801#else
4802void
4803rb_thread_atfork(void)
4804{
4805}
4806
4807void
4809{
4810}
4811#endif
4813struct thgroup {
4814 int enclosed;
4815};
4816
4817static const rb_data_type_t thgroup_data_type = {
4818 "thgroup",
4819 {
4820 0,
4822 NULL, // No external memory to report
4823 },
4824 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4825};
4826
4827/*
4828 * Document-class: ThreadGroup
4829 *
4830 * ThreadGroup provides a means of keeping track of a number of threads as a
4831 * group.
4832 *
4833 * A given Thread object can only belong to one ThreadGroup at a time; adding
4834 * a thread to a new group will remove it from any previous group.
4835 *
4836 * Newly created threads belong to the same group as the thread from which they
4837 * were created.
4838 */
4839
4840/*
4841 * Document-const: Default
4842 *
4843 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4844 * by default.
4845 */
4846static VALUE
4847thgroup_s_alloc(VALUE klass)
4848{
4849 VALUE group;
4850 struct thgroup *data;
4851
4852 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4853 data->enclosed = 0;
4854
4855 return group;
4856}
4857
4858/*
4859 * call-seq:
4860 * thgrp.list -> array
4861 *
4862 * Returns an array of all existing Thread objects that belong to this group.
4863 *
4864 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4865 */
4866
4867static VALUE
4868thgroup_list(VALUE group)
4869{
4870 VALUE ary = rb_ary_new();
4871 rb_thread_t *th = 0;
4872 rb_ractor_t *r = GET_RACTOR();
4873
4874 ccan_list_for_each(&r->threads.set, th, lt_node) {
4875 if (th->thgroup == group) {
4876 rb_ary_push(ary, th->self);
4877 }
4878 }
4879 return ary;
4880}
4881
4882
4883/*
4884 * call-seq:
4885 * thgrp.enclose -> thgrp
4886 *
4887 * Prevents threads from being added to or removed from the receiving
4888 * ThreadGroup.
4889 *
4890 * New threads can still be started in an enclosed ThreadGroup.
4891 *
4892 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4893 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4894 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4895 * tg.add thr
4896 * #=> ThreadError: can't move from the enclosed thread group
4897 */
4898
4899static VALUE
4900thgroup_enclose(VALUE group)
4901{
4902 struct thgroup *data;
4903
4904 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4905 data->enclosed = 1;
4906
4907 return group;
4908}
4909
4910
4911/*
4912 * call-seq:
4913 * thgrp.enclosed? -> true or false
4914 *
4915 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4916 */
4917
4918static VALUE
4919thgroup_enclosed_p(VALUE group)
4920{
4921 struct thgroup *data;
4922
4923 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4924 return RBOOL(data->enclosed);
4925}
4926
4927
4928/*
4929 * call-seq:
4930 * thgrp.add(thread) -> thgrp
4931 *
4932 * Adds the given +thread+ to this group, removing it from any other
4933 * group to which it may have previously been a member.
4934 *
4935 * puts "Initial group is #{ThreadGroup::Default.list}"
4936 * tg = ThreadGroup.new
4937 * t1 = Thread.new { sleep }
4938 * t2 = Thread.new { sleep }
4939 * puts "t1 is #{t1}"
4940 * puts "t2 is #{t2}"
4941 * tg.add(t1)
4942 * puts "Initial group now #{ThreadGroup::Default.list}"
4943 * puts "tg group now #{tg.list}"
4944 *
4945 * This will produce:
4946 *
4947 * Initial group is #<Thread:0x401bdf4c>
4948 * t1 is #<Thread:0x401b3c90>
4949 * t2 is #<Thread:0x401b3c18>
4950 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4951 * tg group now #<Thread:0x401b3c90>
4952 */
4953
4954static VALUE
4955thgroup_add(VALUE group, VALUE thread)
4956{
4957 rb_thread_t *target_th = rb_thread_ptr(thread);
4958 struct thgroup *data;
4959
4960 if (OBJ_FROZEN(group)) {
4961 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4962 }
4963 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4964 if (data->enclosed) {
4965 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4966 }
4967
4968 if (OBJ_FROZEN(target_th->thgroup)) {
4969 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4970 }
4971 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4972 if (data->enclosed) {
4973 rb_raise(rb_eThreadError,
4974 "can't move from the enclosed thread group");
4975 }
4976
4977 target_th->thgroup = group;
4978 return group;
4979}
4980
4981/*
4982 * Document-class: ThreadShield
4983 */
4984static void
4985thread_shield_mark(void *ptr)
4986{
4987 rb_gc_mark((VALUE)ptr);
4988}
4989
4990static const rb_data_type_t thread_shield_data_type = {
4991 "thread_shield",
4992 {thread_shield_mark, 0, 0,},
4993 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4994};
4995
4996static VALUE
4997thread_shield_alloc(VALUE klass)
4998{
4999 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5000}
5001
5002#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5003#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5004#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5005#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5006STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5007static inline unsigned int
5008rb_thread_shield_waiting(VALUE b)
5009{
5010 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5011}
5012
5013static inline void
5014rb_thread_shield_waiting_inc(VALUE b)
5015{
5016 unsigned int w = rb_thread_shield_waiting(b);
5017 w++;
5018 if (w > THREAD_SHIELD_WAITING_MAX)
5019 rb_raise(rb_eRuntimeError, "waiting count overflow");
5020 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5021 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5022}
5023
5024static inline void
5025rb_thread_shield_waiting_dec(VALUE b)
5026{
5027 unsigned int w = rb_thread_shield_waiting(b);
5028 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5029 w--;
5030 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5031 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5032}
5033
5034VALUE
5035rb_thread_shield_new(void)
5036{
5037 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5038 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5039 return thread_shield;
5040}
5041
5042bool
5043rb_thread_shield_owned(VALUE self)
5044{
5045 VALUE mutex = GetThreadShieldPtr(self);
5046 if (!mutex) return false;
5047
5048 rb_mutex_t *m = mutex_ptr(mutex);
5049
5050 return m->fiber == GET_EC()->fiber_ptr;
5051}
5052
5053/*
5054 * Wait a thread shield.
5055 *
5056 * Returns
5057 * true: acquired the thread shield
5058 * false: the thread shield was destroyed and no other threads waiting
5059 * nil: the thread shield was destroyed but still in use
5060 */
5061VALUE
5062rb_thread_shield_wait(VALUE self)
5063{
5064 VALUE mutex = GetThreadShieldPtr(self);
5065 rb_mutex_t *m;
5066
5067 if (!mutex) return Qfalse;
5068 m = mutex_ptr(mutex);
5069 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5070 rb_thread_shield_waiting_inc(self);
5071 rb_mutex_lock(mutex);
5072 rb_thread_shield_waiting_dec(self);
5073 if (DATA_PTR(self)) return Qtrue;
5074 rb_mutex_unlock(mutex);
5075 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5076}
5077
5078static VALUE
5079thread_shield_get_mutex(VALUE self)
5080{
5081 VALUE mutex = GetThreadShieldPtr(self);
5082 if (!mutex)
5083 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5084 return mutex;
5085}
5086
5087/*
5088 * Release a thread shield, and return true if it has waiting threads.
5089 */
5090VALUE
5091rb_thread_shield_release(VALUE self)
5092{
5093 VALUE mutex = thread_shield_get_mutex(self);
5094 rb_mutex_unlock(mutex);
5095 return RBOOL(rb_thread_shield_waiting(self) > 0);
5096}
5097
5098/*
5099 * Release and destroy a thread shield, and return true if it has waiting threads.
5100 */
5101VALUE
5102rb_thread_shield_destroy(VALUE self)
5103{
5104 VALUE mutex = thread_shield_get_mutex(self);
5105 DATA_PTR(self) = 0;
5106 rb_mutex_unlock(mutex);
5107 return RBOOL(rb_thread_shield_waiting(self) > 0);
5108}
5109
5110static VALUE
5111threadptr_recursive_hash(rb_thread_t *th)
5112{
5113 return th->ec->local_storage_recursive_hash;
5114}
5115
5116static void
5117threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5118{
5119 th->ec->local_storage_recursive_hash = hash;
5120}
5121
5123
5124/*
5125 * Returns the current "recursive list" used to detect recursion.
5126 * This list is a hash table, unique for the current thread and for
5127 * the current __callee__.
5128 */
5129
5130static VALUE
5131recursive_list_access(VALUE sym)
5132{
5133 rb_thread_t *th = GET_THREAD();
5134 VALUE hash = threadptr_recursive_hash(th);
5135 VALUE list;
5136 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5137 hash = rb_ident_hash_new();
5138 threadptr_recursive_hash_set(th, hash);
5139 list = Qnil;
5140 }
5141 else {
5142 list = rb_hash_aref(hash, sym);
5143 }
5144 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5145 list = rb_ident_hash_new();
5146 rb_hash_aset(hash, sym, list);
5147 }
5148 return list;
5149}
5150
5151/*
5152 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5153 * in the recursion list.
5154 * Assumes the recursion list is valid.
5155 */
5156
5157static bool
5158recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5159{
5160#if SIZEOF_LONG == SIZEOF_VOIDP
5161 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5162#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5163 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5164 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5165#endif
5166
5167 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5168 if (UNDEF_P(pair_list))
5169 return false;
5170 if (paired_obj_id) {
5171 if (!RB_TYPE_P(pair_list, T_HASH)) {
5172 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5173 return false;
5174 }
5175 else {
5176 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5177 return false;
5178 }
5179 }
5180 return true;
5181}
5182
5183/*
5184 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5185 * For a single obj, it sets list[obj] to Qtrue.
5186 * For a pair, it sets list[obj] to paired_obj_id if possible,
5187 * otherwise list[obj] becomes a hash like:
5188 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5189 * Assumes the recursion list is valid.
5190 */
5191
5192static void
5193recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5194{
5195 VALUE pair_list;
5196
5197 if (!paired_obj) {
5198 rb_hash_aset(list, obj, Qtrue);
5199 }
5200 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5201 rb_hash_aset(list, obj, paired_obj);
5202 }
5203 else {
5204 if (!RB_TYPE_P(pair_list, T_HASH)){
5205 VALUE other_paired_obj = pair_list;
5206 pair_list = rb_hash_new();
5207 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5208 rb_hash_aset(list, obj, pair_list);
5209 }
5210 rb_hash_aset(pair_list, paired_obj, Qtrue);
5211 }
5212}
5213
5214/*
5215 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5216 * For a pair, if list[obj] is a hash, then paired_obj_id is
5217 * removed from the hash and no attempt is made to simplify
5218 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5219 * Assumes the recursion list is valid.
5220 */
5221
5222static int
5223recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5224{
5225 if (paired_obj) {
5226 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5227 if (UNDEF_P(pair_list)) {
5228 return 0;
5229 }
5230 if (RB_TYPE_P(pair_list, T_HASH)) {
5231 rb_hash_delete_entry(pair_list, paired_obj);
5232 if (!RHASH_EMPTY_P(pair_list)) {
5233 return 1; /* keep hash until is empty */
5234 }
5235 }
5236 }
5237 rb_hash_delete_entry(list, obj);
5238 return 1;
5239}
5241struct exec_recursive_params {
5242 VALUE (*func) (VALUE, VALUE, int);
5243 VALUE list;
5244 VALUE obj;
5245 VALUE pairid;
5246 VALUE arg;
5247};
5248
5249static VALUE
5250exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5251{
5252 struct exec_recursive_params *p = (void *)data;
5253 return (*p->func)(p->obj, p->arg, FALSE);
5254}
5255
5256/*
5257 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5258 * current method is called recursively on obj, or on the pair <obj, pairid>
5259 * If outer is 0, then the innermost func will be called with recursive set
5260 * to true, otherwise the outermost func will be called. In the latter case,
5261 * all inner func are short-circuited by throw.
5262 * Implementation details: the value thrown is the recursive list which is
5263 * proper to the current method and unlikely to be caught anywhere else.
5264 * list[recursive_key] is used as a flag for the outermost call.
5265 */
5266
5267static VALUE
5268exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5269{
5270 VALUE result = Qundef;
5271 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5272 struct exec_recursive_params p;
5273 int outermost;
5274 p.list = recursive_list_access(sym);
5275 p.obj = obj;
5276 p.pairid = pairid;
5277 p.arg = arg;
5278 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5279
5280 if (recursive_check(p.list, p.obj, pairid)) {
5281 if (outer && !outermost) {
5282 rb_throw_obj(p.list, p.list);
5283 }
5284 return (*func)(obj, arg, TRUE);
5285 }
5286 else {
5287 enum ruby_tag_type state;
5288
5289 p.func = func;
5290
5291 if (outermost) {
5292 recursive_push(p.list, ID2SYM(recursive_key), 0);
5293 recursive_push(p.list, p.obj, p.pairid);
5294 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5295 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5296 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5297 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5298 if (result == p.list) {
5299 result = (*func)(obj, arg, TRUE);
5300 }
5301 }
5302 else {
5303 volatile VALUE ret = Qundef;
5304 recursive_push(p.list, p.obj, p.pairid);
5305 EC_PUSH_TAG(GET_EC());
5306 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5307 ret = (*func)(obj, arg, FALSE);
5308 }
5309 EC_POP_TAG();
5310 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5311 goto invalid;
5312 }
5313 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5314 result = ret;
5315 }
5316 }
5317 *(volatile struct exec_recursive_params *)&p;
5318 return result;
5319
5320 invalid:
5321 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5322 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5323 sym, rb_thread_current());
5325}
5326
5327/*
5328 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5329 * current method is called recursively on obj
5330 */
5331
5333rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5334{
5335 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5336}
5337
5338/*
5339 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5340 * current method is called recursively on the ordered pair <obj, paired_obj>
5341 */
5342
5344rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5345{
5346 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5347}
5348
5349/*
5350 * If recursion is detected on the current method and obj, the outermost
5351 * func will be called with (obj, arg, true). All inner func will be
5352 * short-circuited using throw.
5353 */
5354
5356rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5357{
5358 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5359}
5360
5361VALUE
5362rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5363{
5364 return exec_recursive(func, obj, 0, arg, 1, mid);
5365}
5366
5367/*
5368 * If recursion is detected on the current method, obj and paired_obj,
5369 * the outermost func will be called with (obj, arg, true). All inner
5370 * func will be short-circuited using throw.
5371 */
5372
5374rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5375{
5376 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5377}
5378
5379/*
5380 * call-seq:
5381 * thread.backtrace -> array or nil
5382 *
5383 * Returns the current backtrace of the target thread.
5384 *
5385 */
5386
5387static VALUE
5388rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5389{
5390 return rb_vm_thread_backtrace(argc, argv, thval);
5391}
5392
5393/* call-seq:
5394 * thread.backtrace_locations(*args) -> array or nil
5395 *
5396 * Returns the execution stack for the target thread---an array containing
5397 * backtrace location objects.
5398 *
5399 * See Thread::Backtrace::Location for more information.
5400 *
5401 * This method behaves similarly to Kernel#caller_locations except it applies
5402 * to a specific thread.
5403 */
5404static VALUE
5405rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5406{
5407 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5408}
5409
5410void
5411Init_Thread_Mutex(void)
5412{
5413 rb_thread_t *th = GET_THREAD();
5414
5415 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5416 rb_native_mutex_initialize(&th->interrupt_lock);
5417}
5418
5419/*
5420 * Document-class: ThreadError
5421 *
5422 * Raised when an invalid operation is attempted on a thread.
5423 *
5424 * For example, when no other thread has been started:
5425 *
5426 * Thread.stop
5427 *
5428 * This will raises the following exception:
5429 *
5430 * ThreadError: stopping only thread
5431 * note: use sleep to stop forever
5432 */
5433
5434void
5435Init_Thread(void)
5436{
5437 VALUE cThGroup;
5438 rb_thread_t *th = GET_THREAD();
5439
5440 sym_never = ID2SYM(rb_intern_const("never"));
5441 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5442 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5443
5444 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5445 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5446 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5447 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5448 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5449 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5450 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5451 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5452 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5453 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5454 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5455 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5456 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5457 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5458 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5459 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5460 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5461 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5462 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5463
5464 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5465 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5466 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5467 rb_define_method(rb_cThread, "value", thread_value, 0);
5469 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5473 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5474 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5475 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5476 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5477 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5478 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5479 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5480 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5481 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5482 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5483 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5484 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5485 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5486 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5487 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5488 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5489 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5490 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5491 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5492 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5493 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5494
5495 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5496 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5497 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5498 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5499 rb_define_alias(rb_cThread, "inspect", "to_s");
5500
5501 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5502 "stream closed in another thread");
5503
5504 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5505 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5506 rb_define_method(cThGroup, "list", thgroup_list, 0);
5507 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5508 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5509 rb_define_method(cThGroup, "add", thgroup_add, 1);
5510
5511 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5512
5513 if (ptr) {
5514 long quantum = strtol(ptr, NULL, 0);
5515 if (quantum > 0 && quantum <= UINT32_MAX) {
5516 thread_default_quantum_ms = (uint32_t)quantum;
5517 }
5518 else if (0) {
5519 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5520 }
5521 }
5522
5523 {
5524 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5525 rb_define_const(cThGroup, "Default", th->thgroup);
5526 }
5527
5529
5530 /* init thread core */
5531 {
5532 /* main thread setting */
5533 {
5534 /* acquire global vm lock */
5535#ifdef HAVE_PTHREAD_NP_H
5536 VM_ASSERT(TH_SCHED(th)->running == th);
5537#endif
5538 // thread_sched_to_running() should not be called because
5539 // it assumes blocked by thread_sched_to_waiting().
5540 // thread_sched_to_running(sched, th);
5541
5542 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5543 th->pending_interrupt_queue_checked = 0;
5544 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5545 }
5546 }
5547
5548 rb_thread_create_timer_thread();
5549
5550 Init_thread_sync();
5551
5552 // TODO: Suppress unused function warning for now
5553 // if (0) rb_thread_sched_destroy(NULL);
5554}
5555
5558{
5559 rb_thread_t *th = ruby_thread_from_native();
5560
5561 return th != 0;
5562}
5563
5564#ifdef NON_SCALAR_THREAD_ID
5565 #define thread_id_str(th) (NULL)
5566#else
5567 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5568#endif
5569
5570static void
5571debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5572{
5573 rb_thread_t *th = 0;
5574 VALUE sep = rb_str_new_cstr("\n ");
5575
5576 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5577 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5578 (void *)GET_THREAD(), (void *)r->threads.main);
5579
5580 ccan_list_for_each(&r->threads.set, th, lt_node) {
5581 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5582 "native:%p int:%u",
5583 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5584
5585 if (th->locking_mutex) {
5586 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5587 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5588 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5589 }
5590
5591 {
5592 struct rb_waiting_list *list = th->join_list;
5593 while (list) {
5594 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5595 list = list->next;
5596 }
5597 }
5598 rb_str_catf(msg, "\n ");
5599 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5600 rb_str_catf(msg, "\n");
5601 }
5602}
5603
5604static void
5605rb_check_deadlock(rb_ractor_t *r)
5606{
5607 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5608
5609#ifdef RUBY_THREAD_PTHREAD_H
5610 if (r->threads.sched.readyq_cnt > 0) return;
5611#endif
5612
5613 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5614 int ltnum = rb_ractor_living_thread_num(r);
5615
5616 if (ltnum > sleeper_num) return;
5617 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5618
5619 int found = 0;
5620 rb_thread_t *th = NULL;
5621
5622 ccan_list_for_each(&r->threads.set, th, lt_node) {
5623 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5624 found = 1;
5625 }
5626 else if (th->locking_mutex) {
5627 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5628 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5629 found = 1;
5630 }
5631 }
5632 if (found)
5633 break;
5634 }
5635
5636 if (!found) {
5637 VALUE argv[2];
5638 argv[0] = rb_eFatal;
5639 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5640 debug_deadlock_check(r, argv[1]);
5641 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5642 rb_threadptr_raise(r->threads.main, 2, argv);
5643 }
5644}
5645
5646// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5647// structs. Defined here because the struct definition lives here as well.
5648size_t
5649rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5650{
5651 struct waiting_fd *waitfd = 0;
5652 size_t size = 0;
5653
5654 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5655 size += sizeof(struct waiting_fd);
5656 }
5657
5658 return size;
5659}
5660
5661static void
5662update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5663{
5664 const rb_control_frame_t *cfp = GET_EC()->cfp;
5665 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5666 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5667 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5668 if (lines) {
5669 long line = rb_sourceline() - 1;
5670 VM_ASSERT(line >= 0);
5671 long count;
5672 VALUE num;
5673 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5674 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5675 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5676 rb_ary_push(lines, LONG2FIX(line + 1));
5677 return;
5678 }
5679 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5680 return;
5681 }
5682 num = RARRAY_AREF(lines, line);
5683 if (!FIXNUM_P(num)) return;
5684 count = FIX2LONG(num) + 1;
5685 if (POSFIXABLE(count)) {
5686 RARRAY_ASET(lines, line, LONG2FIX(count));
5687 }
5688 }
5689 }
5690}
5691
5692static void
5693update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5694{
5695 const rb_control_frame_t *cfp = GET_EC()->cfp;
5696 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5697 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5698 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5699 if (branches) {
5700 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5701 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5702 VALUE counters = RARRAY_AREF(branches, 1);
5703 VALUE num = RARRAY_AREF(counters, idx);
5704 count = FIX2LONG(num) + 1;
5705 if (POSFIXABLE(count)) {
5706 RARRAY_ASET(counters, idx, LONG2FIX(count));
5707 }
5708 }
5709 }
5710}
5711
5712const rb_method_entry_t *
5713rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5714{
5715 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5716
5717 if (!me->def) return NULL; // negative cme
5718
5719 retry:
5720 switch (me->def->type) {
5721 case VM_METHOD_TYPE_ISEQ: {
5722 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5723 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5724 path = rb_iseq_path(iseq);
5725 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5726 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5727 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5728 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5729 break;
5730 }
5731 case VM_METHOD_TYPE_BMETHOD: {
5732 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5733 if (iseq) {
5734 rb_iseq_location_t *loc;
5735 rb_iseq_check(iseq);
5736 path = rb_iseq_path(iseq);
5737 loc = &ISEQ_BODY(iseq)->location;
5738 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5739 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5740 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5741 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5742 break;
5743 }
5744 return NULL;
5745 }
5746 case VM_METHOD_TYPE_ALIAS:
5747 me = me->def->body.alias.original_me;
5748 goto retry;
5749 case VM_METHOD_TYPE_REFINED:
5750 me = me->def->body.refined.orig_me;
5751 if (!me) return NULL;
5752 goto retry;
5753 default:
5754 return NULL;
5755 }
5756
5757 /* found */
5758 if (RB_TYPE_P(path, T_ARRAY)) {
5759 path = rb_ary_entry(path, 1);
5760 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5761 }
5762 if (resolved_location) {
5763 resolved_location[0] = path;
5764 resolved_location[1] = beg_pos_lineno;
5765 resolved_location[2] = beg_pos_column;
5766 resolved_location[3] = end_pos_lineno;
5767 resolved_location[4] = end_pos_column;
5768 }
5769 return me;
5770}
5771
5772static void
5773update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5774{
5775 const rb_control_frame_t *cfp = GET_EC()->cfp;
5776 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5777 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5778 VALUE rcount;
5779 long count;
5780
5781 me = rb_resolve_me_location(me, 0);
5782 if (!me) return;
5783
5784 rcount = rb_hash_aref(me2counter, (VALUE) me);
5785 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5786 if (POSFIXABLE(count)) {
5787 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5788 }
5789}
5790
5791VALUE
5792rb_get_coverages(void)
5793{
5794 return GET_VM()->coverages;
5795}
5796
5797int
5798rb_get_coverage_mode(void)
5799{
5800 return GET_VM()->coverage_mode;
5801}
5802
5803void
5804rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5805{
5806 GET_VM()->coverages = coverages;
5807 GET_VM()->me2counter = me2counter;
5808 GET_VM()->coverage_mode = mode;
5809}
5810
5811void
5812rb_resume_coverages(void)
5813{
5814 int mode = GET_VM()->coverage_mode;
5815 VALUE me2counter = GET_VM()->me2counter;
5816 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5817 if (mode & COVERAGE_TARGET_BRANCHES) {
5818 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5819 }
5820 if (mode & COVERAGE_TARGET_METHODS) {
5821 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5822 }
5823}
5824
5825void
5826rb_suspend_coverages(void)
5827{
5828 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5829 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5830 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5831 }
5832 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5833 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5834 }
5835}
5836
5837/* Make coverage arrays empty so old covered files are no longer tracked. */
5838void
5839rb_reset_coverages(void)
5840{
5841 rb_clear_coverages();
5842 rb_iseq_remove_coverage_all();
5843 GET_VM()->coverages = Qfalse;
5844}
5845
5846VALUE
5847rb_default_coverage(int n)
5848{
5849 VALUE coverage = rb_ary_hidden_new_fill(3);
5850 VALUE lines = Qfalse, branches = Qfalse;
5851 int mode = GET_VM()->coverage_mode;
5852
5853 if (mode & COVERAGE_TARGET_LINES) {
5854 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5855 }
5856 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5857
5858 if (mode & COVERAGE_TARGET_BRANCHES) {
5859 branches = rb_ary_hidden_new_fill(2);
5860 /* internal data structures for branch coverage:
5861 *
5862 * { branch base node =>
5863 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5864 * branch target id =>
5865 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5866 * ...
5867 * }],
5868 * ...
5869 * }
5870 *
5871 * Example:
5872 * { NODE_CASE =>
5873 * [1, 0, 4, 3, {
5874 * NODE_WHEN => [2, 8, 2, 9, 0],
5875 * NODE_WHEN => [3, 8, 3, 9, 1],
5876 * ...
5877 * }],
5878 * ...
5879 * }
5880 */
5881 VALUE structure = rb_hash_new();
5882 rb_obj_hide(structure);
5883 RARRAY_ASET(branches, 0, structure);
5884 /* branch execution counters */
5885 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5886 }
5887 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5888
5889 return coverage;
5890}
5891
5892static VALUE
5893uninterruptible_exit(VALUE v)
5894{
5895 rb_thread_t *cur_th = GET_THREAD();
5896 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5897
5898 cur_th->pending_interrupt_queue_checked = 0;
5899 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5900 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5901 }
5902 return Qnil;
5903}
5904
5905VALUE
5906rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5907{
5908 VALUE interrupt_mask = rb_ident_hash_new();
5909 rb_thread_t *cur_th = GET_THREAD();
5910
5911 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5912 OBJ_FREEZE(interrupt_mask);
5913 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5914
5915 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5916
5917 RUBY_VM_CHECK_INTS(cur_th->ec);
5918 return ret;
5919}
5920
5921static void
5922thread_specific_storage_alloc(rb_thread_t *th)
5923{
5924 VM_ASSERT(th->specific_storage == NULL);
5925
5926 if (UNLIKELY(specific_key_count > 0)) {
5927 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5928 }
5929}
5930
5931rb_internal_thread_specific_key_t
5933{
5934 rb_vm_t *vm = GET_VM();
5935
5936 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5937 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5938 }
5939 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5940 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5941 }
5942 else {
5943 rb_internal_thread_specific_key_t key = specific_key_count++;
5944
5945 if (key == 0) {
5946 // allocate
5947 rb_ractor_t *cr = GET_RACTOR();
5948 rb_thread_t *th;
5949
5950 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5951 thread_specific_storage_alloc(th);
5952 }
5953 }
5954 return key;
5955 }
5956}
5957
5958// async and native thread safe.
5959void *
5960rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5961{
5962 rb_thread_t *th = DATA_PTR(thread_val);
5963
5964 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5965 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5966 VM_ASSERT(th->specific_storage);
5967
5968 return th->specific_storage[key];
5969}
5970
5971// async and native thread safe.
5972void
5973rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5974{
5975 rb_thread_t *th = DATA_PTR(thread_val);
5976
5977 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5978 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5979 VM_ASSERT(th->specific_storage);
5980
5981 th->specific_storage[key] = data;
5982}
5983
5984// interrupt_exec
5987 struct ccan_list_node node;
5988
5989 rb_interrupt_exec_func_t *func;
5990 void *data;
5991 enum rb_interrupt_exec_flag flags;
5992};
5993
5994void
5995rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
5996{
5997 struct rb_interrupt_exec_task *task;
5998
5999 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6000 if (task->flags & rb_interrupt_exec_flag_value_data) {
6001 rb_gc_mark((VALUE)task->data);
6002 }
6003 }
6004}
6005
6006// native thread safe
6007// th should be available
6008void
6009rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6010{
6011 // should not use ALLOC
6013 *task = (struct rb_interrupt_exec_task) {
6014 .flags = flags,
6015 .func = func,
6016 .data = data,
6017 };
6018
6019 rb_native_mutex_lock(&th->interrupt_lock);
6020 {
6021 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6022 threadptr_interrupt_locked(th, true);
6023 }
6024 rb_native_mutex_unlock(&th->interrupt_lock);
6025}
6026
6027static void
6028threadptr_interrupt_exec_exec(rb_thread_t *th)
6029{
6030 while (1) {
6031 struct rb_interrupt_exec_task *task;
6032
6033 rb_native_mutex_lock(&th->interrupt_lock);
6034 {
6035 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6036 }
6037 rb_native_mutex_unlock(&th->interrupt_lock);
6038
6039 if (task) {
6040 (*task->func)(task->data);
6041 ruby_xfree(task);
6042 }
6043 else {
6044 break;
6045 }
6046 }
6047}
6048
6049static void
6050threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6051{
6052 rb_native_mutex_lock(&th->interrupt_lock);
6053 {
6054 struct rb_interrupt_exec_task *task;
6055
6056 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6057 ruby_xfree(task);
6058 }
6059 }
6060 rb_native_mutex_unlock(&th->interrupt_lock);
6061}
6064 rb_interrupt_exec_func_t *func;
6065 void *data;
6066};
6067
6068static VALUE
6069interrupt_ractor_new_thread_func(void *data)
6070{
6072 ruby_xfree(data);
6073
6074 d.func(d.data);
6075 return Qnil;
6076}
6077
6078static VALUE
6079interrupt_ractor_func(void *data)
6080{
6081 rb_thread_create(interrupt_ractor_new_thread_func, data);
6082 return Qnil;
6083}
6084
6085// native thread safe
6086// func/data should be native thread safe
6087void
6088rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6089 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6090{
6092
6093 d->func = func;
6094 d->data = data;
6095 rb_thread_t *main_th = target_r->threads.main;
6096 rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
6097
6098 // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now.
6099}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
struct rb_trace_arg_struct rb_trace_arg_t
Type that represents a specific trace event.
Definition debug.h:465
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:980
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2345
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1160
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:950
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:937
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:288
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4089
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1468
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:955
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4457
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2097
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:104
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cThread
Thread class.
Definition vm.c:544
VALUE rb_cModule
Module class.
Definition object.c:67
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3715
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:838
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1787
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1465
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3922
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1451
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3573
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2766
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3005
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
Definition thread.c:5332
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1381
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2707
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1413
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2917
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
Definition thread.c:5343
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4807
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1434
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2908
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2861
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
Definition thread.c:5373
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1388
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4802
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2984
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3846
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3721
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1482
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
Definition thread.c:5355
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2870
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1457
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1992
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2941
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1951
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1442
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:373
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1886
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1117
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12478
ID rb_to_id(VALUE str)
Definition string.c:12468
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3809
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5959
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5972
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5931
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1539
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1904
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1678
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
Definition thread.c:1685
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2493
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
fd_set rb_fdset_t
The data structure which wraps the fd_set bitmap used by select(2).
Definition posix.h:48
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
struct rb_data_type_struct rb_data_type_t
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:197
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5556
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:753
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:228
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:392
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:189
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:411
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4335
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:300
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:306
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:288
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:294
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376