Ruby 3.2.5p208 (2024-07-26 revision 31d0f1a2e7dbfb60731d1f05b868e1d578cda493)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "gc.h"
77#include "hrtime.h"
78#include "internal.h"
79#include "internal/class.h"
80#include "internal/cont.h"
81#include "internal/error.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "mjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_MJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119enum SLEEP_FLAGS {
120 SLEEP_DEADLOCKABLE = 0x1,
121 SLEEP_SPURIOUS_CHECK = 0x2
122};
123
124#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
125#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
126
127static inline VALUE
128rb_thread_local_storage(VALUE thread)
129{
130 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
131 rb_ivar_set(thread, idLocals, rb_hash_new());
132 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
133 }
134 return rb_ivar_get(thread, idLocals);
135}
136
137static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
138static void sleep_forever(rb_thread_t *th, unsigned int fl);
139static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
140static int rb_threadptr_dead(rb_thread_t *th);
141static void rb_check_deadlock(rb_ractor_t *r);
142static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
143static const char *thread_status_name(rb_thread_t *th, int detail);
144static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
145NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
146static int consume_communication_pipe(int fd);
147static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
148void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
149
150#define eKillSignal INT2FIX(0)
151#define eTerminateSignal INT2FIX(1)
152static volatile int system_working = 1;
153
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158};
159
160/********************************************************************************/
161
162#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
163
165 enum rb_thread_status prev_status;
166};
167
168static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
169static void unblock_function_clear(rb_thread_t *th);
170
171static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
172 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
173static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
174
175#define THREAD_BLOCKING_BEGIN(th) do { \
176 struct rb_thread_sched * const sched = TH_SCHED(th); \
177 RB_GC_SAVE_MACHINE_CONTEXT(th); \
178 thread_sched_to_waiting(sched);
179
180#define THREAD_BLOCKING_END(th) \
181 thread_sched_to_running(sched, th); \
182 rb_ractor_thread_switch(th->ractor, th); \
183} while(0)
184
185#ifdef __GNUC__
186#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
187#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
188#else
189#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
190#endif
191#else
192#define only_if_constant(expr, notconst) notconst
193#endif
194#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
195 struct rb_blocking_region_buffer __region; \
196 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
197 /* always return true unless fail_if_interrupted */ \
198 !only_if_constant(fail_if_interrupted, TRUE)) { \
199 /* Important that this is inlined into the macro, and not part of \
200 * blocking_region_begin - see bug #20493 */ \
201 RB_GC_SAVE_MACHINE_CONTEXT(th); \
202 thread_sched_to_waiting(TH_SCHED(th)); \
203 exec; \
204 blocking_region_end(th, &__region); \
205 }; \
206} while(0)
207
208/*
209 * returns true if this thread was spuriously interrupted, false otherwise
210 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
211 */
212#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
213static inline int
214vm_check_ints_blocking(rb_execution_context_t *ec)
215{
216 rb_thread_t *th = rb_ec_thread_ptr(ec);
217
218 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
219 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
220 }
221 else {
222 th->pending_interrupt_queue_checked = 0;
223 RUBY_VM_SET_INTERRUPT(ec);
224 }
225 return rb_threadptr_execute_interrupts(th, 1);
226}
227
228int
229rb_vm_check_ints_blocking(rb_execution_context_t *ec)
230{
231 return vm_check_ints_blocking(ec);
232}
233
234/*
235 * poll() is supported by many OSes, but so far Linux is the only
236 * one we know of that supports using poll() in all places select()
237 * would work.
238 */
239#if defined(HAVE_POLL)
240# if defined(__linux__)
241# define USE_POLL
242# endif
243# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
244# define USE_POLL
245 /* FreeBSD does not set POLLOUT when POLLHUP happens */
246# define POLLERR_SET (POLLHUP | POLLERR)
247# endif
248#endif
249
250static void
251timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
252 const struct timeval *timeout)
253{
254 if (timeout) {
255 *rel = rb_timeval2hrtime(timeout);
256 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
257 *to = rel;
258 }
259 else {
260 *to = 0;
261 }
262}
263
264MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
265void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
266
267static void
268ubf_sigwait(void *ignore)
269{
270 rb_thread_wakeup_timer_thread(0);
271}
272
273#include THREAD_IMPL_SRC
274
275/*
276 * TODO: somebody with win32 knowledge should be able to get rid of
277 * timer-thread by busy-waiting on signals. And it should be possible
278 * to make the GVL in thread_pthread.c be platform-independent.
279 */
280#ifndef BUSY_WAIT_SIGNALS
281# define BUSY_WAIT_SIGNALS (0)
282#endif
283
284#ifndef USE_EVENTFD
285# define USE_EVENTFD (0)
286#endif
287
288#include "thread_sync.c"
289
290void
291rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
292{
294}
295
296void
297rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
298{
300}
301
302void
303rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
304{
306}
307
308void
309rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
310{
312}
313
314static int
315unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
316{
317 do {
318 if (fail_if_interrupted) {
319 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
320 return FALSE;
321 }
322 }
323 else {
324 RUBY_VM_CHECK_INTS(th->ec);
325 }
326
327 rb_native_mutex_lock(&th->interrupt_lock);
328 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
329 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
330
331 VM_ASSERT(th->unblock.func == NULL);
332
333 th->unblock.func = func;
334 th->unblock.arg = arg;
335 rb_native_mutex_unlock(&th->interrupt_lock);
336
337 return TRUE;
338}
339
340static void
341unblock_function_clear(rb_thread_t *th)
342{
343 rb_native_mutex_lock(&th->interrupt_lock);
344 th->unblock.func = 0;
345 rb_native_mutex_unlock(&th->interrupt_lock);
346}
347
348static void
349rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
350{
351 rb_native_mutex_lock(&th->interrupt_lock);
352
353 if (trap) {
354 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
355 }
356 else {
357 RUBY_VM_SET_INTERRUPT(th->ec);
358 }
359 if (th->unblock.func != NULL) {
360 (th->unblock.func)(th->unblock.arg);
361 }
362 else {
363 /* none */
364 }
365 rb_native_mutex_unlock(&th->interrupt_lock);
366}
367
368void
369rb_threadptr_interrupt(rb_thread_t *th)
370{
371 rb_threadptr_interrupt_common(th, 0);
372}
373
374static void
375threadptr_trap_interrupt(rb_thread_t *th)
376{
377 rb_threadptr_interrupt_common(th, 1);
378}
379
380static void
381terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
382{
383 rb_thread_t *th = 0;
384
385 ccan_list_for_each(&r->threads.set, th, lt_node) {
386 if (th != main_thread) {
387 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
388
389 rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
390 rb_threadptr_interrupt(th);
391
392 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
393 }
394 else {
395 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
396 }
397 }
398}
399
400static void
401rb_threadptr_join_list_wakeup(rb_thread_t *thread)
402{
403 while (thread->join_list) {
404 struct rb_waiting_list *join_list = thread->join_list;
405
406 // Consume the entry from the join list:
407 thread->join_list = join_list->next;
408
409 rb_thread_t *target_thread = join_list->thread;
410
411 if (target_thread->scheduler != Qnil && join_list->fiber) {
412 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
413 }
414 else {
415 rb_threadptr_interrupt(target_thread);
416
417 switch (target_thread->status) {
418 case THREAD_STOPPED:
419 case THREAD_STOPPED_FOREVER:
420 target_thread->status = THREAD_RUNNABLE;
421 default:
422 break;
423 }
424 }
425 }
426}
427
428void
429rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
430{
431 while (th->keeping_mutexes) {
432 rb_mutex_t *mutex = th->keeping_mutexes;
433 th->keeping_mutexes = mutex->next_mutex;
434
435 /* rb_warn("mutex #<%p> remains to be locked by terminated thread", (void *)mutexes); */
436
437 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
438 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
439 }
440}
441
442void
443rb_thread_terminate_all(rb_thread_t *th)
444{
445 rb_ractor_t *cr = th->ractor;
446 rb_execution_context_t * volatile ec = th->ec;
447 volatile int sleeping = 0;
448
449 if (cr->threads.main != th) {
450 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
451 (void *)cr->threads.main, (void *)th);
452 }
453
454 /* unlock all locking mutexes */
455 rb_threadptr_unlock_all_locking_mutexes(th);
456
457 EC_PUSH_TAG(ec);
458 if (EC_EXEC_TAG() == TAG_NONE) {
459 retry:
460 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
461
462 terminate_all(cr, th);
463
464 while (rb_ractor_living_thread_num(cr) > 1) {
465 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
466 /*q
467 * Thread exiting routine in thread_start_func_2 notify
468 * me when the last sub-thread exit.
469 */
470 sleeping = 1;
471 native_sleep(th, &rel);
472 RUBY_VM_CHECK_INTS_BLOCKING(ec);
473 sleeping = 0;
474 }
475 }
476 else {
477 /*
478 * When caught an exception (e.g. Ctrl+C), let's broadcast
479 * kill request again to ensure killing all threads even
480 * if they are blocked on sleep, mutex, etc.
481 */
482 if (sleeping) {
483 sleeping = 0;
484 goto retry;
485 }
486 }
487 EC_POP_TAG();
488}
489
490void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
491
492static void
493thread_cleanup_func_before_exec(void *th_ptr)
494{
495 rb_thread_t *th = th_ptr;
496 th->status = THREAD_KILLED;
497
498 // The thread stack doesn't exist in the forked process:
499 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
500
501 rb_threadptr_root_fiber_terminate(th);
502}
503
504static void
505thread_cleanup_func(void *th_ptr, int atfork)
506{
507 rb_thread_t *th = th_ptr;
508
509 th->locking_mutex = Qfalse;
510 thread_cleanup_func_before_exec(th_ptr);
511
512 /*
513 * Unfortunately, we can't release native threading resource at fork
514 * because libc may have unstable locking state therefore touching
515 * a threading resource may cause a deadlock.
516 *
517 * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
518 * with NPTL, but native_thread_destroy calls pthread_cond_destroy
519 * which calls free(3), so there is a small memory leak atfork, here.
520 */
521 if (atfork)
522 return;
523
524 rb_native_mutex_destroy(&th->interrupt_lock);
525 native_thread_destroy(th);
526}
527
528static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
529static VALUE rb_thread_to_s(VALUE thread);
530
531void
532ruby_thread_init_stack(rb_thread_t *th)
533{
534 native_thread_init_stack(th);
535}
536
537const VALUE *
538rb_vm_proc_local_ep(VALUE proc)
539{
540 const VALUE *ep = vm_proc_ep(proc);
541
542 if (ep) {
543 return rb_vm_ep_local_ep(ep);
544 }
545 else {
546 return NULL;
547 }
548}
549
550// for ractor, defined in vm.c
551VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
552 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
553
554static VALUE
555thread_do_start_proc(rb_thread_t *th)
556{
557 VALUE args = th->invoke_arg.proc.args;
558 const VALUE *args_ptr;
559 int args_len;
560 VALUE procval = th->invoke_arg.proc.proc;
561 rb_proc_t *proc;
562 GetProcPtr(procval, proc);
563
564 th->ec->errinfo = Qnil;
565 th->ec->root_lep = rb_vm_proc_local_ep(procval);
566 th->ec->root_svar = Qfalse;
567
568 vm_check_ints_blocking(th->ec);
569
570 if (th->invoke_type == thread_invoke_type_ractor_proc) {
571 VALUE self = rb_ractor_self(th->ractor);
572 VM_ASSERT(FIXNUM_P(args));
573 args_len = FIX2INT(args);
574 args_ptr = ALLOCA_N(VALUE, args_len);
575 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
576 vm_check_ints_blocking(th->ec);
577
578 return rb_vm_invoke_proc_with_self(
579 th->ec, proc, self,
580 args_len, args_ptr,
581 th->invoke_arg.proc.kw_splat,
582 VM_BLOCK_HANDLER_NONE
583 );
584 }
585 else {
586 args_len = RARRAY_LENINT(args);
587 if (args_len < 8) {
588 /* free proc.args if the length is enough small */
589 args_ptr = ALLOCA_N(VALUE, args_len);
590 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
591 th->invoke_arg.proc.args = Qnil;
592 }
593 else {
594 args_ptr = RARRAY_CONST_PTR(args);
595 }
596
597 vm_check_ints_blocking(th->ec);
598
599 return rb_vm_invoke_proc(
600 th->ec, proc,
601 args_len, args_ptr,
602 th->invoke_arg.proc.kw_splat,
603 VM_BLOCK_HANDLER_NONE
604 );
605 }
606}
607
608static VALUE
609thread_do_start(rb_thread_t *th)
610{
611 native_set_thread_name(th);
612 VALUE result = Qundef;
613
614 switch (th->invoke_type) {
615 case thread_invoke_type_proc:
616 result = thread_do_start_proc(th);
617 break;
618
619 case thread_invoke_type_ractor_proc:
620 result = thread_do_start_proc(th);
621 rb_ractor_atexit(th->ec, result);
622 break;
623
624 case thread_invoke_type_func:
625 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
626 break;
627
628 case thread_invoke_type_none:
629 rb_bug("unreachable");
630 }
631
632 return result;
633}
634
635void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
636#define thread_sched_to_dead thread_sched_to_waiting
637
638static int
639thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
640{
641 STACK_GROW_DIR_DETECTION;
642 enum ruby_tag_type state;
643 VALUE errinfo = Qnil;
644 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
645 rb_thread_t *ractor_main_th = th->ractor->threads.main;
646 VALUE * vm_stack = NULL;
647
648 VM_ASSERT(th != th->vm->ractor.main_thread);
649 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
650
651 // setup native thread
652 thread_sched_to_running(TH_SCHED(th), th);
653 ruby_thread_set_native(th);
654
655 RUBY_DEBUG_LOG("got lock. th:%u", rb_th_serial(th));
656
657 // setup ractor
658 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
659 RB_VM_LOCK();
660 {
661 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
662 rb_ractor_t *r = th->ractor;
663 r->r_stdin = rb_io_prep_stdin();
664 r->r_stdout = rb_io_prep_stdout();
665 r->r_stderr = rb_io_prep_stderr();
666 }
667 RB_VM_UNLOCK();
668 }
669
670 // This assertion is not passed on win32 env. Check it later.
671 // VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
672
673 // setup VM and machine stack
674 vm_stack = alloca(size * sizeof(VALUE));
675 VM_ASSERT(vm_stack);
676
677 rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
678 th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
679 th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
680
681 // Ensure that we are not joinable.
682 VM_ASSERT(UNDEF_P(th->value));
683
684 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
685 VALUE result = Qundef;
686
687 EC_PUSH_TAG(th->ec);
688
689 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
690 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
691
692 SAVE_ROOT_JMPBUF(th, result = thread_do_start(th));
693 }
694
695 if (!fiber_scheduler_closed) {
696 fiber_scheduler_closed = 1;
698 }
699
700 if (!event_thread_end_hooked) {
701 event_thread_end_hooked = 1;
702 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
703 }
704
705 if (state == TAG_NONE) {
706 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
707 th->value = result;
708 } else {
709 errinfo = th->ec->errinfo;
710
711 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
712 if (!NIL_P(exc)) errinfo = exc;
713
714 if (state == TAG_FATAL) {
715 if (th->invoke_type == thread_invoke_type_ractor_proc) {
716 rb_ractor_atexit(th->ec, Qnil);
717 }
718 /* fatal error within this thread, need to stop whole script */
719 }
720 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
721 /* exit on main_thread. */
722 }
723 else {
724 if (th->report_on_exception) {
725 VALUE mesg = rb_thread_to_s(th->self);
726 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
727 rb_write_error_str(mesg);
728 rb_ec_error_print(th->ec, errinfo);
729 }
730
731 if (th->invoke_type == thread_invoke_type_ractor_proc) {
732 rb_ractor_atexit_exception(th->ec);
733 }
734
735 if (th->vm->thread_abort_on_exception ||
736 th->abort_on_exception || RTEST(ruby_debug)) {
737 /* exit on main_thread */
738 }
739 else {
740 errinfo = Qnil;
741 }
742 }
743 th->value = Qnil;
744 }
745
746 // The thread is effectively finished and can be joined.
747 VM_ASSERT(!UNDEF_P(th->value));
748
749 rb_threadptr_join_list_wakeup(th);
750 rb_threadptr_unlock_all_locking_mutexes(th);
751
752 if (th->invoke_type == thread_invoke_type_ractor_proc) {
753 rb_thread_terminate_all(th);
754 rb_ractor_teardown(th->ec);
755 }
756
757 th->status = THREAD_KILLED;
758 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
759
760 if (th->vm->ractor.main_thread == th) {
761 ruby_stop(0);
762 }
763
764 if (RB_TYPE_P(errinfo, T_OBJECT)) {
765 /* treat with normal error object */
766 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
767 }
768
769 EC_POP_TAG();
770
771 rb_ec_clear_current_thread_trace_func(th->ec);
772
773 /* locking_mutex must be Qfalse */
774 if (th->locking_mutex != Qfalse) {
775 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
776 (void *)th, th->locking_mutex);
777 }
778
779 if (ractor_main_th->status == THREAD_KILLED &&
780 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
781 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
782 rb_threadptr_interrupt(ractor_main_th);
783 }
784
785 rb_check_deadlock(th->ractor);
786
787 rb_fiber_close(th->ec->fiber_ptr);
788
789 thread_cleanup_func(th, FALSE);
790 VM_ASSERT(th->ec->vm_stack == NULL);
791
792 if (th->invoke_type == thread_invoke_type_ractor_proc) {
793 // after rb_ractor_living_threads_remove()
794 // GC will happen anytime and this ractor can be collected (and destroy GVL).
795 // So gvl_release() should be before it.
796 thread_sched_to_dead(TH_SCHED(th));
797 rb_ractor_living_threads_remove(th->ractor, th);
798 }
799 else {
800 rb_ractor_living_threads_remove(th->ractor, th);
801 thread_sched_to_dead(TH_SCHED(th));
802 }
803
804 return 0;
805}
808 enum thread_invoke_type type;
809
810 // for normal proc thread
811 VALUE args;
812 VALUE proc;
813
814 // for ractor
815 rb_ractor_t *g;
816
817 // for func
818 VALUE (*fn)(void *);
819};
820
821static VALUE
822thread_create_core(VALUE thval, struct thread_create_params *params)
823{
824 rb_execution_context_t *ec = GET_EC();
825 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
826 int err;
827
828 if (OBJ_FROZEN(current_th->thgroup)) {
830 "can't start a new thread (frozen ThreadGroup)");
831 }
832
833 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
834
835 switch (params->type) {
836 case thread_invoke_type_proc:
837 th->invoke_type = thread_invoke_type_proc;
838 th->invoke_arg.proc.args = params->args;
839 th->invoke_arg.proc.proc = params->proc;
840 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
841 break;
842
843 case thread_invoke_type_ractor_proc:
844#if RACTOR_CHECK_MODE > 0
845 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
846#endif
847 th->invoke_type = thread_invoke_type_ractor_proc;
848 th->ractor = params->g;
849 th->ractor->threads.main = th;
850 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
851 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
852 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
853 rb_ractor_send_parameters(ec, params->g, params->args);
854 break;
855
856 case thread_invoke_type_func:
857 th->invoke_type = thread_invoke_type_func;
858 th->invoke_arg.func.func = params->fn;
859 th->invoke_arg.func.arg = (void *)params->args;
860 break;
861
862 default:
863 rb_bug("unreachable");
864 }
865
866 th->priority = current_th->priority;
867 th->thgroup = current_th->thgroup;
868
869 th->pending_interrupt_queue = rb_ary_hidden_new(0);
870 th->pending_interrupt_queue_checked = 0;
871 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
872 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
873
874 rb_native_mutex_initialize(&th->interrupt_lock);
875
876 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
877
878 rb_ractor_living_threads_insert(th->ractor, th);
879
880 /* kick thread */
881 err = native_thread_create(th);
882 if (err) {
883 th->status = THREAD_KILLED;
884 rb_ractor_living_threads_remove(th->ractor, th);
885 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
886 }
887 return thval;
888}
889
890#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
891
892/*
893 * call-seq:
894 * Thread.new { ... } -> thread
895 * Thread.new(*args, &proc) -> thread
896 * Thread.new(*args) { |args| ... } -> thread
897 *
898 * Creates a new thread executing the given block.
899 *
900 * Any +args+ given to ::new will be passed to the block:
901 *
902 * arr = []
903 * a, b, c = 1, 2, 3
904 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
905 * arr #=> [1, 2, 3]
906 *
907 * A ThreadError exception is raised if ::new is called without a block.
908 *
909 * If you're going to subclass Thread, be sure to call super in your
910 * +initialize+ method, otherwise a ThreadError will be raised.
911 */
912static VALUE
913thread_s_new(int argc, VALUE *argv, VALUE klass)
914{
915 rb_thread_t *th;
916 VALUE thread = rb_thread_alloc(klass);
917
918 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
919 rb_raise(rb_eThreadError, "can't alloc thread");
920 }
921
922 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
923 th = rb_thread_ptr(thread);
924 if (!threadptr_initialized(th)) {
925 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
926 klass);
927 }
928 return thread;
929}
930
931/*
932 * call-seq:
933 * Thread.start([args]*) {|args| block } -> thread
934 * Thread.fork([args]*) {|args| block } -> thread
935 *
936 * Basically the same as ::new. However, if class Thread is subclassed, then
937 * calling +start+ in that subclass will not invoke the subclass's
938 * +initialize+ method.
939 */
940
941static VALUE
942thread_start(VALUE klass, VALUE args)
943{
944 struct thread_create_params params = {
945 .type = thread_invoke_type_proc,
946 .args = args,
947 .proc = rb_block_proc(),
948 };
949 return thread_create_core(rb_thread_alloc(klass), &params);
950}
951
952static VALUE
953threadptr_invoke_proc_location(rb_thread_t *th)
954{
955 if (th->invoke_type == thread_invoke_type_proc) {
956 return rb_proc_location(th->invoke_arg.proc.proc);
957 }
958 else {
959 return Qnil;
960 }
961}
962
963/* :nodoc: */
964static VALUE
965thread_initialize(VALUE thread, VALUE args)
966{
967 rb_thread_t *th = rb_thread_ptr(thread);
968
969 if (!rb_block_given_p()) {
970 rb_raise(rb_eThreadError, "must be called with a block");
971 }
972 else if (th->invoke_type != thread_invoke_type_none) {
973 VALUE loc = threadptr_invoke_proc_location(th);
974 if (!NIL_P(loc)) {
976 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
977 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
978 }
979 else {
980 rb_raise(rb_eThreadError, "already initialized thread");
981 }
982 }
983 else {
984 struct thread_create_params params = {
985 .type = thread_invoke_type_proc,
986 .args = args,
987 .proc = rb_block_proc(),
988 };
989 return thread_create_core(thread, &params);
990 }
991}
992
994rb_thread_create(VALUE (*fn)(void *), void *arg)
995{
996 struct thread_create_params params = {
997 .type = thread_invoke_type_func,
998 .fn = fn,
999 .args = (VALUE)arg,
1000 };
1001 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1002}
1003
1004VALUE
1005rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
1006{
1007 struct thread_create_params params = {
1008 .type = thread_invoke_type_ractor_proc,
1009 .g = g,
1010 .args = args,
1011 .proc = proc,
1012 };
1013 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1014}
1015
1017struct join_arg {
1018 struct rb_waiting_list *waiter;
1019 rb_thread_t *target;
1020 VALUE timeout;
1021 rb_hrtime_t *limit;
1022};
1023
1024static VALUE
1025remove_from_join_list(VALUE arg)
1026{
1027 struct join_arg *p = (struct join_arg *)arg;
1028 rb_thread_t *target_thread = p->target;
1029
1030 if (target_thread->status != THREAD_KILLED) {
1031 struct rb_waiting_list **join_list = &target_thread->join_list;
1032
1033 while (*join_list) {
1034 if (*join_list == p->waiter) {
1035 *join_list = (*join_list)->next;
1036 break;
1037 }
1038
1039 join_list = &(*join_list)->next;
1040 }
1041 }
1042
1043 return Qnil;
1044}
1045
1046static int
1047thread_finished(rb_thread_t *th)
1048{
1049 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1050}
1051
1052static VALUE
1053thread_join_sleep(VALUE arg)
1054{
1055 struct join_arg *p = (struct join_arg *)arg;
1056 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1057 rb_hrtime_t end = 0, *limit = p->limit;
1058
1059 if (limit) {
1060 end = rb_hrtime_add(*limit, rb_hrtime_now());
1061 }
1062
1063 while (!thread_finished(target_th)) {
1064 VALUE scheduler = rb_fiber_scheduler_current();
1065
1066 if (scheduler != Qnil) {
1067 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1068 // Check if the target thread is finished after blocking:
1069 if (thread_finished(target_th)) break;
1070 // Otherwise, a timeout occurred:
1071 else return Qfalse;
1072 }
1073 else if (!limit) {
1074 th->status = THREAD_STOPPED_FOREVER;
1075 rb_ractor_sleeper_threads_inc(th->ractor);
1076 rb_check_deadlock(th->ractor);
1077 native_sleep(th, 0);
1078 rb_ractor_sleeper_threads_dec(th->ractor);
1079 }
1080 else {
1081 if (hrtime_update_expire(limit, end)) {
1082 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1083 return Qfalse;
1084 }
1085 th->status = THREAD_STOPPED;
1086 native_sleep(th, limit);
1087 }
1088 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1089 th->status = THREAD_RUNNABLE;
1090
1091 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1092 }
1093
1094 return Qtrue;
1095}
1096
1097static VALUE
1098thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1099{
1100 rb_execution_context_t *ec = GET_EC();
1101 rb_thread_t *th = ec->thread_ptr;
1102 rb_fiber_t *fiber = ec->fiber_ptr;
1103
1104 if (th == target_th) {
1105 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1106 }
1107
1108 if (th->ractor->threads.main == target_th) {
1109 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1110 }
1111
1112 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1113
1114 if (target_th->status != THREAD_KILLED) {
1115 struct rb_waiting_list waiter;
1116 waiter.next = target_th->join_list;
1117 waiter.thread = th;
1118 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1119 target_th->join_list = &waiter;
1120
1121 struct join_arg arg;
1122 arg.waiter = &waiter;
1123 arg.target = target_th;
1124 arg.timeout = timeout;
1125 arg.limit = limit;
1126
1127 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1128 return Qnil;
1129 }
1130 }
1131
1132 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1133
1134 if (target_th->ec->errinfo != Qnil) {
1135 VALUE err = target_th->ec->errinfo;
1136
1137 if (FIXNUM_P(err)) {
1138 switch (err) {
1139 case INT2FIX(TAG_FATAL):
1140 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1141
1142 /* OK. killed. */
1143 break;
1144 default:
1145 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1146 }
1147 }
1148 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1149 rb_bug("thread_join: THROW_DATA should not reach here.");
1150 }
1151 else {
1152 /* normal exception */
1153 rb_exc_raise(err);
1154 }
1155 }
1156 return target_th->self;
1157}
1158
1159/*
1160 * call-seq:
1161 * thr.join -> thr
1162 * thr.join(limit) -> thr
1163 *
1164 * The calling thread will suspend execution and run this +thr+.
1165 *
1166 * Does not return until +thr+ exits or until the given +limit+ seconds have
1167 * passed.
1168 *
1169 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1170 * returned.
1171 *
1172 * Any threads not joined will be killed when the main program exits.
1173 *
1174 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1175 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1176 * will be processed at this time.
1177 *
1178 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1179 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1180 * x.join # Let thread x finish, thread a will be killed on exit.
1181 * #=> "axyz"
1182 *
1183 * The following example illustrates the +limit+ parameter.
1184 *
1185 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1186 * puts "Waiting" until y.join(0.15)
1187 *
1188 * This will produce:
1189 *
1190 * tick...
1191 * Waiting
1192 * tick...
1193 * Waiting
1194 * tick...
1195 * tick...
1196 */
1197
1198static VALUE
1199thread_join_m(int argc, VALUE *argv, VALUE self)
1200{
1201 VALUE timeout = Qnil;
1202 rb_hrtime_t rel = 0, *limit = 0;
1203
1204 if (rb_check_arity(argc, 0, 1)) {
1205 timeout = argv[0];
1206 }
1207
1208 // Convert the timeout eagerly, so it's always converted and deterministic
1209 /*
1210 * This supports INFINITY and negative values, so we can't use
1211 * rb_time_interval right now...
1212 */
1213 if (NIL_P(timeout)) {
1214 /* unlimited */
1215 }
1216 else if (FIXNUM_P(timeout)) {
1217 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1218 limit = &rel;
1219 }
1220 else {
1221 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1222 }
1223
1224 return thread_join(rb_thread_ptr(self), timeout, limit);
1225}
1226
1227/*
1228 * call-seq:
1229 * thr.value -> obj
1230 *
1231 * Waits for +thr+ to complete, using #join, and returns its value or raises
1232 * the exception which terminated the thread.
1233 *
1234 * a = Thread.new { 2 + 2 }
1235 * a.value #=> 4
1236 *
1237 * b = Thread.new { raise 'something went wrong' }
1238 * b.value #=> RuntimeError: something went wrong
1239 */
1240
1241static VALUE
1242thread_value(VALUE self)
1243{
1244 rb_thread_t *th = rb_thread_ptr(self);
1245 thread_join(th, Qnil, 0);
1246 if (UNDEF_P(th->value)) {
1247 // If the thread is dead because we forked th->value is still Qundef.
1248 return Qnil;
1249 }
1250 return th->value;
1251}
1252
1253/*
1254 * Thread Scheduling
1255 */
1256
1257static void
1258getclockofday(struct timespec *ts)
1259{
1260#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1261 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1262 return;
1263#endif
1264 rb_timespec_now(ts);
1265}
1266
1267/*
1268 * Don't inline this, since library call is already time consuming
1269 * and we don't want "struct timespec" on stack too long for GC
1270 */
1271NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1272rb_hrtime_t
1273rb_hrtime_now(void)
1274{
1275 struct timespec ts;
1276
1277 getclockofday(&ts);
1278 return rb_timespec2hrtime(&ts);
1279}
1280
1281static void
1282sleep_forever(rb_thread_t *th, unsigned int fl)
1283{
1284 enum rb_thread_status prev_status = th->status;
1285 enum rb_thread_status status;
1286 int woke;
1287
1288 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1289 th->status = status;
1290 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1291 while (th->status == status) {
1292 if (fl & SLEEP_DEADLOCKABLE) {
1293 rb_ractor_sleeper_threads_inc(th->ractor);
1294 rb_check_deadlock(th->ractor);
1295 }
1296 native_sleep(th, 0);
1297 if (fl & SLEEP_DEADLOCKABLE) {
1298 rb_ractor_sleeper_threads_dec(th->ractor);
1299 }
1300 woke = vm_check_ints_blocking(th->ec);
1301 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1302 break;
1303 }
1304 th->status = prev_status;
1305}
1306
1307/*
1308 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1309 * being uninitialized, maybe other versions, too.
1310 */
1311COMPILER_WARNING_PUSH
1312#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1313COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1314#endif
1315#ifndef PRIu64
1316#define PRIu64 PRI_64_PREFIX "u"
1317#endif
1318/*
1319 * @end is the absolute time when @ts is set to expire
1320 * Returns true if @end has past
1321 * Updates @ts and returns false otherwise
1322 */
1323static int
1324hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1325{
1326 rb_hrtime_t now = rb_hrtime_now();
1327
1328 if (now > end) return 1;
1329
1330 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1331
1332 *timeout = end - now;
1333 return 0;
1334}
1335COMPILER_WARNING_POP
1336
1337static int
1338sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1339{
1340 enum rb_thread_status prev_status = th->status;
1341 int woke;
1342 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1343
1344 th->status = THREAD_STOPPED;
1345 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1346 while (th->status == THREAD_STOPPED) {
1347 native_sleep(th, &rel);
1348 woke = vm_check_ints_blocking(th->ec);
1349 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1350 break;
1351 if (hrtime_update_expire(&rel, end))
1352 break;
1353 woke = 1;
1354 }
1355 th->status = prev_status;
1356 return woke;
1357}
1358
1359static int
1360sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1361{
1362 enum rb_thread_status prev_status = th->status;
1363 int woke;
1364 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1365
1366 th->status = THREAD_STOPPED;
1367 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1368 while (th->status == THREAD_STOPPED) {
1369 native_sleep(th, &rel);
1370 woke = vm_check_ints_blocking(th->ec);
1371 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1372 break;
1373 if (hrtime_update_expire(&rel, end))
1374 break;
1375 woke = 1;
1376 }
1377 th->status = prev_status;
1378 return woke;
1379}
1380
1381void
1383{
1384 RUBY_DEBUG_LOG("");
1385 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1386}
1387
1388void
1390{
1391 RUBY_DEBUG_LOG("");
1392 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1393}
1394
1395void
1396rb_thread_sleep_interruptible(void)
1397{
1398 rb_thread_t *th = GET_THREAD();
1399 enum rb_thread_status prev_status = th->status;
1400
1401 th->status = THREAD_STOPPED;
1402 native_sleep(th, 0);
1403 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1404 th->status = prev_status;
1405}
1406
1407static void
1408rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1409{
1410 VALUE scheduler = rb_fiber_scheduler_current();
1411 if (scheduler != Qnil) {
1412 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1413 }
1414 else {
1415 RUBY_DEBUG_LOG("");
1416 if (end) {
1417 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1418 }
1419 else {
1420 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1421 }
1422 }
1423}
1424
1425void
1426rb_thread_wait_for(struct timeval time)
1427{
1428 rb_thread_t *th = GET_THREAD();
1429
1430 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1431}
1432
1433/*
1434 * CAUTION: This function causes thread switching.
1435 * rb_thread_check_ints() check ruby's interrupts.
1436 * some interrupt needs thread switching/invoke handlers,
1437 * and so on.
1438 */
1439
1440void
1442{
1443 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1444}
1445
1446/*
1447 * Hidden API for tcl/tk wrapper.
1448 * There is no guarantee to perpetuate it.
1449 */
1450int
1451rb_thread_check_trap_pending(void)
1452{
1453 return rb_signal_buff_size() != 0;
1454}
1455
1456/* This function can be called in blocking region. */
1459{
1460 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1461}
1462
1463void
1464rb_thread_sleep(int sec)
1465{
1467}
1468
1469static void
1470rb_thread_schedule_limits(uint32_t limits_us)
1471{
1472 if (!rb_thread_alone()) {
1473 rb_thread_t *th = GET_THREAD();
1474 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1475
1476 if (th->running_time_us >= limits_us) {
1477 RUBY_DEBUG_LOG("switch %s", "start");
1478
1479 RB_GC_SAVE_MACHINE_CONTEXT(th);
1480 thread_sched_yield(TH_SCHED(th), th);
1481 rb_ractor_thread_switch(th->ractor, th);
1482
1483 RUBY_DEBUG_LOG("switch %s", "done");
1484 }
1485 }
1486}
1487
1488void
1490{
1491 rb_thread_schedule_limits(0);
1492 RUBY_VM_CHECK_INTS(GET_EC());
1493}
1494
1495/* blocking region */
1496
1497static inline int
1498blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1499 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1500{
1501#ifdef RUBY_VM_CRITICAL_SECTION
1502 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1503#endif
1504 VM_ASSERT(th == GET_THREAD());
1505
1506 region->prev_status = th->status;
1507 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1508 th->blocking_region_buffer = region;
1509 th->status = THREAD_STOPPED;
1510 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1511
1512 RUBY_DEBUG_LOG("");
1513 return TRUE;
1514 }
1515 else {
1516 return FALSE;
1517 }
1518}
1519
1520static inline void
1521blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1522{
1523 /* entry to ubf_list still permitted at this point, make it impossible: */
1524 unblock_function_clear(th);
1525 /* entry to ubf_list impossible at this point, so unregister is safe: */
1526 unregister_ubf_list(th);
1527
1528 thread_sched_to_running(TH_SCHED(th), th);
1529 rb_ractor_thread_switch(th->ractor, th);
1530
1531 th->blocking_region_buffer = 0;
1532 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1533 if (th->status == THREAD_STOPPED) {
1534 th->status = region->prev_status;
1535 }
1536
1537 RUBY_DEBUG_LOG("");
1538 VM_ASSERT(th == GET_THREAD());
1539}
1540
1541void *
1542rb_nogvl(void *(*func)(void *), void *data1,
1543 rb_unblock_function_t *ubf, void *data2,
1544 int flags)
1545{
1546 void *val = 0;
1547 rb_execution_context_t *ec = GET_EC();
1548 rb_thread_t *th = rb_ec_thread_ptr(ec);
1549 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1550 bool is_main_thread = vm->ractor.main_thread == th;
1551 int saved_errno = 0;
1552 VALUE ubf_th = Qfalse;
1553
1554 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1555 ubf = ubf_select;
1556 data2 = th;
1557 }
1558 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1559 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1560 vm->ubf_async_safe = 1;
1561 }
1562 else {
1563 ubf_th = rb_thread_start_unblock_thread();
1564 }
1565 }
1566
1567 BLOCKING_REGION(th, {
1568 val = func(data1);
1569 saved_errno = errno;
1570 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1571
1572 if (is_main_thread) vm->ubf_async_safe = 0;
1573
1574 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1575 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1576 }
1577
1578 if (ubf_th != Qfalse) {
1579 thread_value(rb_thread_kill(ubf_th));
1580 }
1581
1582 errno = saved_errno;
1583
1584 return val;
1585}
1586
1587/*
1588 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1589 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1590 * without interrupt process.
1591 *
1592 * rb_thread_call_without_gvl() does:
1593 * (1) Check interrupts.
1594 * (2) release GVL.
1595 * Other Ruby threads may run in parallel.
1596 * (3) call func with data1
1597 * (4) acquire GVL.
1598 * Other Ruby threads can not run in parallel any more.
1599 * (5) Check interrupts.
1600 *
1601 * rb_thread_call_without_gvl2() does:
1602 * (1) Check interrupt and return if interrupted.
1603 * (2) release GVL.
1604 * (3) call func with data1 and a pointer to the flags.
1605 * (4) acquire GVL.
1606 *
1607 * If another thread interrupts this thread (Thread#kill, signal delivery,
1608 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1609 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1610 * toggling a cancellation flag, canceling the invocation of a call inside
1611 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1612 *
1613 * There are built-in ubfs and you can specify these ubfs:
1614 *
1615 * * RUBY_UBF_IO: ubf for IO operation
1616 * * RUBY_UBF_PROCESS: ubf for process operation
1617 *
1618 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1619 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1620 * provide proper ubf(), your program will not stop for Control+C or other
1621 * shutdown events.
1622 *
1623 * "Check interrupts" on above list means checking asynchronous
1624 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1625 * request, and so on) and calling corresponding procedures
1626 * (such as `trap' for signals, raise an exception for Thread#raise).
1627 * If `func()' finished and received interrupts, you may skip interrupt
1628 * checking. For example, assume the following func() it reads data from file.
1629 *
1630 * read_func(...) {
1631 * // (a) before read
1632 * read(buffer); // (b) reading
1633 * // (c) after read
1634 * }
1635 *
1636 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1637 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1638 * at (c), after *read* operation is completed, checking interrupts is harmful
1639 * because it causes irrevocable side-effect, the read data will vanish. To
1640 * avoid such problem, the `read_func()' should be used with
1641 * `rb_thread_call_without_gvl2()'.
1642 *
1643 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1644 * immediately. This function does not show when the execution was interrupted.
1645 * For example, there are 4 possible timing (a), (b), (c) and before calling
1646 * read_func(). You need to record progress of a read_func() and check
1647 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1648 * `rb_thread_check_ints()' correctly or your program can not process proper
1649 * process such as `trap' and so on.
1650 *
1651 * NOTE: You can not execute most of Ruby C API and touch Ruby
1652 * objects in `func()' and `ubf()', including raising an
1653 * exception, because current thread doesn't acquire GVL
1654 * (it causes synchronization problems). If you need to
1655 * call ruby functions either use rb_thread_call_with_gvl()
1656 * or read source code of C APIs and confirm safety by
1657 * yourself.
1658 *
1659 * NOTE: In short, this API is difficult to use safely. I recommend you
1660 * use other ways if you have. We lack experiences to use this API.
1661 * Please report your problem related on it.
1662 *
1663 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1664 * for a short running `func()'. Be sure to benchmark and use this
1665 * mechanism when `func()' consumes enough time.
1666 *
1667 * Safe C API:
1668 * * rb_thread_interrupted() - check interrupt flag
1669 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1670 * they will work without GVL, and may acquire GVL when GC is needed.
1671 */
1672void *
1673rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1674 rb_unblock_function_t *ubf, void *data2)
1675{
1676 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1677}
1678
1679void *
1680rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1681 rb_unblock_function_t *ubf, void *data2)
1682{
1683 return rb_nogvl(func, data1, ubf, data2, 0);
1684}
1685
1686VALUE
1687rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1688{
1689 volatile VALUE val = Qundef; /* shouldn't be used */
1690 rb_execution_context_t * volatile ec = GET_EC();
1691 volatile int saved_errno = 0;
1692 enum ruby_tag_type state;
1693
1694 struct waiting_fd waiting_fd = {
1695 .fd = fd,
1696 .th = rb_ec_thread_ptr(ec)
1697 };
1698
1699 // `errno` is only valid when there is an actual error - but we can't
1700 // extract that from the return value of `func` alone, so we clear any
1701 // prior `errno` value here so that we can later check if it was set by
1702 // `func` or not (as opposed to some previously set value).
1703 errno = 0;
1704
1705 RB_VM_LOCK_ENTER();
1706 {
1707 ccan_list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
1708 }
1709 RB_VM_LOCK_LEAVE();
1710
1711 EC_PUSH_TAG(ec);
1712 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1713 BLOCKING_REGION(waiting_fd.th, {
1714 val = func(data1);
1715 saved_errno = errno;
1716 }, ubf_select, waiting_fd.th, FALSE);
1717 }
1718 EC_POP_TAG();
1719
1720 /*
1721 * must be deleted before jump
1722 * this will delete either from waiting_fds or on-stack CCAN_LIST_HEAD(busy)
1723 */
1724 RB_VM_LOCK_ENTER();
1725 {
1726 ccan_list_del(&waiting_fd.wfd_node);
1727 }
1728 RB_VM_LOCK_LEAVE();
1729
1730 if (state) {
1731 EC_JUMP_TAG(ec, state);
1732 }
1733 /* TODO: check func() */
1734 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1735
1736 // If the error was a timeout, we raise a specific exception for that:
1737 if (saved_errno == ETIMEDOUT) {
1738 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1739 }
1740
1741 errno = saved_errno;
1742
1743 return val;
1744}
1745
1746/*
1747 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1748 *
1749 * After releasing GVL using
1750 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1751 * methods. If you need to access Ruby you must use this function
1752 * rb_thread_call_with_gvl().
1753 *
1754 * This function rb_thread_call_with_gvl() does:
1755 * (1) acquire GVL.
1756 * (2) call passed function `func'.
1757 * (3) release GVL.
1758 * (4) return a value which is returned at (2).
1759 *
1760 * NOTE: You should not return Ruby object at (2) because such Object
1761 * will not be marked.
1762 *
1763 * NOTE: If an exception is raised in `func', this function DOES NOT
1764 * protect (catch) the exception. If you have any resources
1765 * which should free before throwing exception, you need use
1766 * rb_protect() in `func' and return a value which represents
1767 * exception was raised.
1768 *
1769 * NOTE: This function should not be called by a thread which was not
1770 * created as Ruby thread (created by Thread.new or so). In other
1771 * words, this function *DOES NOT* associate or convert a NON-Ruby
1772 * thread to a Ruby thread.
1773 */
1774void *
1775rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1776{
1777 rb_thread_t *th = ruby_thread_from_native();
1778 struct rb_blocking_region_buffer *brb;
1779 struct rb_unblock_callback prev_unblock;
1780 void *r;
1781
1782 if (th == 0) {
1783 /* Error has occurred, but we can't use rb_bug()
1784 * because this thread is not Ruby's thread.
1785 * What should we do?
1786 */
1787 bp();
1788 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1789 exit(EXIT_FAILURE);
1790 }
1791
1792 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1793 prev_unblock = th->unblock;
1794
1795 if (brb == 0) {
1796 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1797 }
1798
1799 blocking_region_end(th, brb);
1800 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1801 r = (*func)(data1);
1802 /* leave from Ruby world: You can not access Ruby values, etc. */
1803 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1804 RUBY_ASSERT_ALWAYS(released);
1805 RB_GC_SAVE_MACHINE_CONTEXT(th);
1806 thread_sched_to_waiting(TH_SCHED(th));
1807 return r;
1808}
1809
1810/*
1811 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1812 *
1813 ***
1814 *** This API is EXPERIMENTAL!
1815 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1816 ***
1817 */
1818
1819int
1820ruby_thread_has_gvl_p(void)
1821{
1822 rb_thread_t *th = ruby_thread_from_native();
1823
1824 if (th && th->blocking_region_buffer == 0) {
1825 return 1;
1826 }
1827 else {
1828 return 0;
1829 }
1830}
1831
1832/*
1833 * call-seq:
1834 * Thread.pass -> nil
1835 *
1836 * Give the thread scheduler a hint to pass execution to another thread.
1837 * A running thread may or may not switch, it depends on OS and processor.
1838 */
1839
1840static VALUE
1841thread_s_pass(VALUE klass)
1842{
1844 return Qnil;
1845}
1846
1847/*****************************************************/
1848
1849/*
1850 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1851 *
1852 * Async events such as an exception thrown by Thread#raise,
1853 * Thread#kill and thread termination (after main thread termination)
1854 * will be queued to th->pending_interrupt_queue.
1855 * - clear: clear the queue.
1856 * - enque: enqueue err object into queue.
1857 * - deque: dequeue err object from queue.
1858 * - active_p: return 1 if the queue should be checked.
1859 *
1860 * All rb_threadptr_pending_interrupt_* functions are called by
1861 * a GVL acquired thread, of course.
1862 * Note that all "rb_" prefix APIs need GVL to call.
1863 */
1864
1865void
1866rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1867{
1868 rb_ary_clear(th->pending_interrupt_queue);
1869}
1870
1871void
1872rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1873{
1874 rb_ary_push(th->pending_interrupt_queue, v);
1875 th->pending_interrupt_queue_checked = 0;
1876}
1877
1878static void
1879threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1880{
1881 if (!th->pending_interrupt_queue) {
1882 rb_raise(rb_eThreadError, "uninitialized thread");
1883 }
1884}
1885
1886enum handle_interrupt_timing {
1887 INTERRUPT_NONE,
1888 INTERRUPT_IMMEDIATE,
1889 INTERRUPT_ON_BLOCKING,
1890 INTERRUPT_NEVER
1891};
1892
1893static enum handle_interrupt_timing
1894rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1895{
1896 VALUE mask;
1897 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1898 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1899 VALUE mod;
1900 long i;
1901
1902 for (i=0; i<mask_stack_len; i++) {
1903 mask = mask_stack[mask_stack_len-(i+1)];
1904
1905 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1906 VALUE klass = mod;
1907 VALUE sym;
1908
1909 if (BUILTIN_TYPE(mod) == T_ICLASS) {
1910 klass = RBASIC(mod)->klass;
1911 }
1912 else if (mod != RCLASS_ORIGIN(mod)) {
1913 continue;
1914 }
1915
1916 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1917 if (sym == sym_immediate) {
1918 return INTERRUPT_IMMEDIATE;
1919 }
1920 else if (sym == sym_on_blocking) {
1921 return INTERRUPT_ON_BLOCKING;
1922 }
1923 else if (sym == sym_never) {
1924 return INTERRUPT_NEVER;
1925 }
1926 else {
1927 rb_raise(rb_eThreadError, "unknown mask signature");
1928 }
1929 }
1930 }
1931 /* try to next mask */
1932 }
1933 return INTERRUPT_NONE;
1934}
1935
1936static int
1937rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
1938{
1939 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1940}
1941
1942static int
1943rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
1944{
1945 int i;
1946 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1947 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
1948 if (rb_obj_is_kind_of(e, err)) {
1949 return TRUE;
1950 }
1951 }
1952 return FALSE;
1953}
1954
1955static VALUE
1956rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
1957{
1958#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1959 int i;
1960
1961 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1962 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
1963
1964 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
1965
1966 switch (mask_timing) {
1967 case INTERRUPT_ON_BLOCKING:
1968 if (timing != INTERRUPT_ON_BLOCKING) {
1969 break;
1970 }
1971 /* fall through */
1972 case INTERRUPT_NONE: /* default: IMMEDIATE */
1973 case INTERRUPT_IMMEDIATE:
1974 rb_ary_delete_at(th->pending_interrupt_queue, i);
1975 return err;
1976 case INTERRUPT_NEVER:
1977 break;
1978 }
1979 }
1980
1981 th->pending_interrupt_queue_checked = 1;
1982 return Qundef;
1983#else
1984 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
1985 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1986 th->pending_interrupt_queue_checked = 1;
1987 }
1988 return err;
1989#endif
1990}
1991
1992static int
1993threadptr_pending_interrupt_active_p(rb_thread_t *th)
1994{
1995 /*
1996 * For optimization, we don't check async errinfo queue
1997 * if the queue and the thread interrupt mask were not changed
1998 * since last check.
1999 */
2000 if (th->pending_interrupt_queue_checked) {
2001 return 0;
2002 }
2003
2004 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2005 return 0;
2006 }
2007
2008 return 1;
2009}
2010
2011static int
2012handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2013{
2014 VALUE *maskp = (VALUE *)args;
2015
2016 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2017 rb_raise(rb_eArgError, "unknown mask signature");
2018 }
2019
2020 if (!*maskp) {
2021 *maskp = rb_ident_hash_new();
2022 }
2023 rb_hash_aset(*maskp, key, val);
2024
2025 return ST_CONTINUE;
2026}
2027
2028/*
2029 * call-seq:
2030 * Thread.handle_interrupt(hash) { ... } -> result of the block
2031 *
2032 * Changes asynchronous interrupt timing.
2033 *
2034 * _interrupt_ means asynchronous event and corresponding procedure
2035 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2036 * and main thread termination (if main thread terminates, then all
2037 * other thread will be killed).
2038 *
2039 * The given +hash+ has pairs like <code>ExceptionClass =>
2040 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2041 * the given block. The TimingSymbol can be one of the following symbols:
2042 *
2043 * [+:immediate+] Invoke interrupts immediately.
2044 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2045 * [+:never+] Never invoke all interrupts.
2046 *
2047 * _BlockingOperation_ means that the operation will block the calling thread,
2048 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2049 * operation executed without GVL.
2050 *
2051 * Masked asynchronous interrupts are delayed until they are enabled.
2052 * This method is similar to sigprocmask(3).
2053 *
2054 * === NOTE
2055 *
2056 * Asynchronous interrupts are difficult to use.
2057 *
2058 * If you need to communicate between threads, please consider to use another way such as Queue.
2059 *
2060 * Or use them with deep understanding about this method.
2061 *
2062 * === Usage
2063 *
2064 * In this example, we can guard from Thread#raise exceptions.
2065 *
2066 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2067 * ignored in the first block of the main thread. In the second
2068 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2069 *
2070 * th = Thread.new do
2071 * Thread.handle_interrupt(RuntimeError => :never) {
2072 * begin
2073 * # You can write resource allocation code safely.
2074 * Thread.handle_interrupt(RuntimeError => :immediate) {
2075 * # ...
2076 * }
2077 * ensure
2078 * # You can write resource deallocation code safely.
2079 * end
2080 * }
2081 * end
2082 * Thread.pass
2083 * # ...
2084 * th.raise "stop"
2085 *
2086 * While we are ignoring the RuntimeError exception, it's safe to write our
2087 * resource allocation code. Then, the ensure block is where we can safely
2088 * deallocate your resources.
2089 *
2090 * ==== Guarding from Timeout::Error
2091 *
2092 * In the next example, we will guard from the Timeout::Error exception. This
2093 * will help prevent from leaking resources when Timeout::Error exceptions occur
2094 * during normal ensure clause. For this example we use the help of the
2095 * standard library Timeout, from lib/timeout.rb
2096 *
2097 * require 'timeout'
2098 * Thread.handle_interrupt(Timeout::Error => :never) {
2099 * timeout(10){
2100 * # Timeout::Error doesn't occur here
2101 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2102 * # possible to be killed by Timeout::Error
2103 * # while blocking operation
2104 * }
2105 * # Timeout::Error doesn't occur here
2106 * }
2107 * }
2108 *
2109 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2110 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2111 * operation that will block the calling thread is susceptible to a
2112 * Timeout::Error exception being raised.
2113 *
2114 * ==== Stack control settings
2115 *
2116 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2117 * to control more than one ExceptionClass and TimingSymbol at a time.
2118 *
2119 * Thread.handle_interrupt(FooError => :never) {
2120 * Thread.handle_interrupt(BarError => :never) {
2121 * # FooError and BarError are prohibited.
2122 * }
2123 * }
2124 *
2125 * ==== Inheritance with ExceptionClass
2126 *
2127 * All exceptions inherited from the ExceptionClass parameter will be considered.
2128 *
2129 * Thread.handle_interrupt(Exception => :never) {
2130 * # all exceptions inherited from Exception are prohibited.
2131 * }
2132 *
2133 * For handling all interrupts, use +Object+ and not +Exception+
2134 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2135 */
2136static VALUE
2137rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2138{
2139 VALUE mask;
2140 rb_execution_context_t * volatile ec = GET_EC();
2141 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2142 volatile VALUE r = Qnil;
2143 enum ruby_tag_type state;
2144
2145 if (!rb_block_given_p()) {
2146 rb_raise(rb_eArgError, "block is needed.");
2147 }
2148
2149 mask = 0;
2150 mask_arg = rb_to_hash_type(mask_arg);
2151 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2152 if (!mask) {
2153 return rb_yield(Qnil);
2154 }
2155 OBJ_FREEZE_RAW(mask);
2156 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2157 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2158 th->pending_interrupt_queue_checked = 0;
2159 RUBY_VM_SET_INTERRUPT(th->ec);
2160 }
2161
2162 EC_PUSH_TAG(th->ec);
2163 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2164 r = rb_yield(Qnil);
2165 }
2166 EC_POP_TAG();
2167
2168 rb_ary_pop(th->pending_interrupt_mask_stack);
2169 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2170 th->pending_interrupt_queue_checked = 0;
2171 RUBY_VM_SET_INTERRUPT(th->ec);
2172 }
2173
2174 RUBY_VM_CHECK_INTS(th->ec);
2175
2176 if (state) {
2177 EC_JUMP_TAG(th->ec, state);
2178 }
2179
2180 return r;
2181}
2182
2183/*
2184 * call-seq:
2185 * target_thread.pending_interrupt?(error = nil) -> true/false
2186 *
2187 * Returns whether or not the asynchronous queue is empty for the target thread.
2188 *
2189 * If +error+ is given, then check only for +error+ type deferred events.
2190 *
2191 * See ::pending_interrupt? for more information.
2192 */
2193static VALUE
2194rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2195{
2196 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2197
2198 if (!target_th->pending_interrupt_queue) {
2199 return Qfalse;
2200 }
2201 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2202 return Qfalse;
2203 }
2204 if (rb_check_arity(argc, 0, 1)) {
2205 VALUE err = argv[0];
2206 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2207 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2208 }
2209 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2210 }
2211 else {
2212 return Qtrue;
2213 }
2214}
2215
2216/*
2217 * call-seq:
2218 * Thread.pending_interrupt?(error = nil) -> true/false
2219 *
2220 * Returns whether or not the asynchronous queue is empty.
2221 *
2222 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2223 * this method can be used to determine if there are any deferred events.
2224 *
2225 * If you find this method returns true, then you may finish +:never+ blocks.
2226 *
2227 * For example, the following method processes deferred asynchronous events
2228 * immediately.
2229 *
2230 * def Thread.kick_interrupt_immediately
2231 * Thread.handle_interrupt(Object => :immediate) {
2232 * Thread.pass
2233 * }
2234 * end
2235 *
2236 * If +error+ is given, then check only for +error+ type deferred events.
2237 *
2238 * === Usage
2239 *
2240 * th = Thread.new{
2241 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2242 * while true
2243 * ...
2244 * # reach safe point to invoke interrupt
2245 * if Thread.pending_interrupt?
2246 * Thread.handle_interrupt(Object => :immediate){}
2247 * end
2248 * ...
2249 * end
2250 * }
2251 * }
2252 * ...
2253 * th.raise # stop thread
2254 *
2255 * This example can also be written as the following, which you should use to
2256 * avoid asynchronous interrupts.
2257 *
2258 * flag = true
2259 * th = Thread.new{
2260 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2261 * while true
2262 * ...
2263 * # reach safe point to invoke interrupt
2264 * break if flag == false
2265 * ...
2266 * end
2267 * }
2268 * }
2269 * ...
2270 * flag = false # stop thread
2271 */
2272
2273static VALUE
2274rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2275{
2276 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2277}
2278
2279NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2280
2281static void
2282rb_threadptr_to_kill(rb_thread_t *th)
2283{
2284 rb_threadptr_pending_interrupt_clear(th);
2285 th->status = THREAD_RUNNABLE;
2286 th->to_kill = 1;
2287 th->ec->errinfo = INT2FIX(TAG_FATAL);
2288 EC_JUMP_TAG(th->ec, TAG_FATAL);
2289}
2290
2291static inline rb_atomic_t
2292threadptr_get_interrupts(rb_thread_t *th)
2293{
2294 rb_execution_context_t *ec = th->ec;
2295 rb_atomic_t interrupt;
2296 rb_atomic_t old;
2297
2298 do {
2299 interrupt = ec->interrupt_flag;
2300 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2301 } while (old != interrupt);
2302 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2303}
2304
2305#if USE_MJIT
2306// process.c
2307extern bool mjit_waitpid_finished;
2308extern int mjit_waitpid_status;
2309#endif
2310
2311MJIT_FUNC_EXPORTED int
2312rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2313{
2314 rb_atomic_t interrupt;
2315 int postponed_job_interrupt = 0;
2316 int ret = FALSE;
2317
2318 if (th->ec->raised_flag) return ret;
2319
2320 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2321 int sig;
2322 int timer_interrupt;
2323 int pending_interrupt;
2324 int trap_interrupt;
2325 int terminate_interrupt;
2326
2327 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2328 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2329 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2330 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2331 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2332
2333 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2334 RB_VM_LOCK_ENTER();
2335 RB_VM_LOCK_LEAVE();
2336 }
2337
2338 if (postponed_job_interrupt) {
2339 rb_postponed_job_flush(th->vm);
2340 }
2341
2342 /* signal handling */
2343 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2344 enum rb_thread_status prev_status = th->status;
2345 int sigwait_fd = rb_sigwait_fd_get(th);
2346
2347 if (sigwait_fd >= 0) {
2348 (void)consume_communication_pipe(sigwait_fd);
2349 ruby_sigchld_handler(th->vm);
2350 rb_sigwait_fd_put(th, sigwait_fd);
2351 rb_sigwait_fd_migrate(th->vm);
2352 }
2353 th->status = THREAD_RUNNABLE;
2354 while ((sig = rb_get_next_signal()) != 0) {
2355 ret |= rb_signal_exec(th, sig);
2356 }
2357 th->status = prev_status;
2358 }
2359
2360#if USE_MJIT
2361 // Handle waitpid_signal for MJIT issued by ruby_sigchld_handler. This needs to be done
2362 // outside ruby_sigchld_handler to avoid recursively relying on the SIGCHLD handler.
2363 if (mjit_waitpid_finished && th == th->vm->ractor.main_thread) {
2364 mjit_waitpid_finished = false;
2365 mjit_notify_waitpid(WIFEXITED(mjit_waitpid_status) ? WEXITSTATUS(mjit_waitpid_status) : -1);
2366 }
2367#endif
2368
2369 /* exception from another thread */
2370 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2371 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2372 RUBY_DEBUG_LOG("err:%"PRIdVALUE"\n", err);
2373 ret = TRUE;
2374
2375 if (UNDEF_P(err)) {
2376 /* no error */
2377 }
2378 else if (err == eKillSignal /* Thread#kill received */ ||
2379 err == eTerminateSignal /* Terminate thread */ ||
2380 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2381 terminate_interrupt = 1;
2382 }
2383 else {
2384 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2385 /* the only special exception to be queued across thread */
2386 err = ruby_vm_special_exception_copy(err);
2387 }
2388 /* set runnable if th was slept. */
2389 if (th->status == THREAD_STOPPED ||
2390 th->status == THREAD_STOPPED_FOREVER)
2391 th->status = THREAD_RUNNABLE;
2392 rb_exc_raise(err);
2393 }
2394 }
2395
2396 if (terminate_interrupt) {
2397 rb_threadptr_to_kill(th);
2398 }
2399
2400 if (timer_interrupt) {
2401 uint32_t limits_us = TIME_QUANTUM_USEC;
2402
2403 if (th->priority > 0)
2404 limits_us <<= th->priority;
2405 else
2406 limits_us >>= -th->priority;
2407
2408 if (th->status == THREAD_RUNNABLE)
2409 th->running_time_us += TIME_QUANTUM_USEC;
2410
2411 VM_ASSERT(th->ec->cfp);
2412 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2413 0, 0, 0, Qundef);
2414
2415 rb_thread_schedule_limits(limits_us);
2416 }
2417 }
2418 return ret;
2419}
2420
2421void
2422rb_thread_execute_interrupts(VALUE thval)
2423{
2424 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2425}
2426
2427static void
2428rb_threadptr_ready(rb_thread_t *th)
2429{
2430 rb_threadptr_interrupt(th);
2431}
2432
2433static VALUE
2434rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2435{
2436 VALUE exc;
2437
2438 if (rb_threadptr_dead(target_th)) {
2439 return Qnil;
2440 }
2441
2442 if (argc == 0) {
2443 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2444 }
2445 else {
2446 exc = rb_make_exception(argc, argv);
2447 }
2448
2449 /* making an exception object can switch thread,
2450 so we need to check thread deadness again */
2451 if (rb_threadptr_dead(target_th)) {
2452 return Qnil;
2453 }
2454
2455 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2456 rb_threadptr_pending_interrupt_enque(target_th, exc);
2457 rb_threadptr_interrupt(target_th);
2458 return Qnil;
2459}
2460
2461void
2462rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2463{
2464 VALUE argv[2];
2465
2466 argv[0] = rb_eSignal;
2467 argv[1] = INT2FIX(sig);
2468 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2469}
2470
2471void
2472rb_threadptr_signal_exit(rb_thread_t *th)
2473{
2474 VALUE argv[2];
2475
2476 argv[0] = rb_eSystemExit;
2477 argv[1] = rb_str_new2("exit");
2478
2479 // TODO: check signal raise deliverly
2480 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2481}
2482
2483int
2484rb_ec_set_raised(rb_execution_context_t *ec)
2485{
2486 if (ec->raised_flag & RAISED_EXCEPTION) {
2487 return 1;
2488 }
2489 ec->raised_flag |= RAISED_EXCEPTION;
2490 return 0;
2491}
2492
2493int
2494rb_ec_reset_raised(rb_execution_context_t *ec)
2495{
2496 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2497 return 0;
2498 }
2499 ec->raised_flag &= ~RAISED_EXCEPTION;
2500 return 1;
2501}
2502
2503int
2504rb_notify_fd_close(int fd, struct ccan_list_head *busy)
2505{
2506 rb_vm_t *vm = GET_THREAD()->vm;
2507 struct waiting_fd *wfd = 0, *next;
2508
2509 RB_VM_LOCK_ENTER();
2510 {
2511 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2512 if (wfd->fd == fd) {
2513 rb_thread_t *th = wfd->th;
2514 VALUE err;
2515
2516 ccan_list_del(&wfd->wfd_node);
2517 ccan_list_add(busy, &wfd->wfd_node);
2518
2519 err = th->vm->special_exceptions[ruby_error_stream_closed];
2520 rb_threadptr_pending_interrupt_enque(th, err);
2521 rb_threadptr_interrupt(th);
2522 }
2523 }
2524 }
2525 RB_VM_LOCK_LEAVE();
2526
2527 return !ccan_list_empty(busy);
2528}
2529
2530void
2531rb_thread_fd_close(int fd)
2532{
2533 struct ccan_list_head busy;
2534
2535 ccan_list_head_init(&busy);
2536 if (rb_notify_fd_close(fd, &busy)) {
2537 do rb_thread_schedule(); while (!ccan_list_empty(&busy));
2538 }
2539}
2540
2541/*
2542 * call-seq:
2543 * thr.raise
2544 * thr.raise(string)
2545 * thr.raise(exception [, string [, array]])
2546 *
2547 * Raises an exception from the given thread. The caller does not have to be
2548 * +thr+. See Kernel#raise for more information.
2549 *
2550 * Thread.abort_on_exception = true
2551 * a = Thread.new { sleep(200) }
2552 * a.raise("Gotcha")
2553 *
2554 * This will produce:
2555 *
2556 * prog.rb:3: Gotcha (RuntimeError)
2557 * from prog.rb:2:in `initialize'
2558 * from prog.rb:2:in `new'
2559 * from prog.rb:2
2560 */
2561
2562static VALUE
2563thread_raise_m(int argc, VALUE *argv, VALUE self)
2564{
2565 rb_thread_t *target_th = rb_thread_ptr(self);
2566 const rb_thread_t *current_th = GET_THREAD();
2567
2568 threadptr_check_pending_interrupt_queue(target_th);
2569 rb_threadptr_raise(target_th, argc, argv);
2570
2571 /* To perform Thread.current.raise as Kernel.raise */
2572 if (current_th == target_th) {
2573 RUBY_VM_CHECK_INTS(target_th->ec);
2574 }
2575 return Qnil;
2576}
2577
2578
2579/*
2580 * call-seq:
2581 * thr.exit -> thr
2582 * thr.kill -> thr
2583 * thr.terminate -> thr
2584 *
2585 * Terminates +thr+ and schedules another thread to be run, returning
2586 * the terminated Thread. If this is the main thread, or the last
2587 * thread, exits the process.
2588 */
2589
2591rb_thread_kill(VALUE thread)
2592{
2593 rb_thread_t *target_th = rb_thread_ptr(thread);
2594
2595 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2596 return thread;
2597 }
2598 if (target_th == target_th->vm->ractor.main_thread) {
2599 rb_exit(EXIT_SUCCESS);
2600 }
2601
2602 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2603
2604 if (target_th == GET_THREAD()) {
2605 /* kill myself immediately */
2606 rb_threadptr_to_kill(target_th);
2607 }
2608 else {
2609 threadptr_check_pending_interrupt_queue(target_th);
2610 rb_threadptr_pending_interrupt_enque(target_th, eKillSignal);
2611 rb_threadptr_interrupt(target_th);
2612 }
2613
2614 return thread;
2615}
2616
2617int
2618rb_thread_to_be_killed(VALUE thread)
2619{
2620 rb_thread_t *target_th = rb_thread_ptr(thread);
2621
2622 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2623 return TRUE;
2624 }
2625 return FALSE;
2626}
2627
2628/*
2629 * call-seq:
2630 * Thread.kill(thread) -> thread
2631 *
2632 * Causes the given +thread+ to exit, see also Thread::exit.
2633 *
2634 * count = 0
2635 * a = Thread.new { loop { count += 1 } }
2636 * sleep(0.1) #=> 0
2637 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2638 * count #=> 93947
2639 * a.alive? #=> false
2640 */
2641
2642static VALUE
2643rb_thread_s_kill(VALUE obj, VALUE th)
2644{
2645 return rb_thread_kill(th);
2646}
2647
2648
2649/*
2650 * call-seq:
2651 * Thread.exit -> thread
2652 *
2653 * Terminates the currently running thread and schedules another thread to be
2654 * run.
2655 *
2656 * If this thread is already marked to be killed, ::exit returns the Thread.
2657 *
2658 * If this is the main thread, or the last thread, exit the process.
2659 */
2660
2661static VALUE
2662rb_thread_exit(VALUE _)
2663{
2664 rb_thread_t *th = GET_THREAD();
2665 return rb_thread_kill(th->self);
2666}
2667
2668
2669/*
2670 * call-seq:
2671 * thr.wakeup -> thr
2672 *
2673 * Marks a given thread as eligible for scheduling, however it may still
2674 * remain blocked on I/O.
2675 *
2676 * *Note:* This does not invoke the scheduler, see #run for more information.
2677 *
2678 * c = Thread.new { Thread.stop; puts "hey!" }
2679 * sleep 0.1 while c.status!='sleep'
2680 * c.wakeup
2681 * c.join
2682 * #=> "hey!"
2683 */
2684
2686rb_thread_wakeup(VALUE thread)
2687{
2688 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2689 rb_raise(rb_eThreadError, "killed thread");
2690 }
2691 return thread;
2692}
2693
2696{
2697 rb_thread_t *target_th = rb_thread_ptr(thread);
2698 if (target_th->status == THREAD_KILLED) return Qnil;
2699
2700 rb_threadptr_ready(target_th);
2701
2702 if (target_th->status == THREAD_STOPPED ||
2703 target_th->status == THREAD_STOPPED_FOREVER) {
2704 target_th->status = THREAD_RUNNABLE;
2705 }
2706
2707 return thread;
2708}
2709
2710
2711/*
2712 * call-seq:
2713 * thr.run -> thr
2714 *
2715 * Wakes up +thr+, making it eligible for scheduling.
2716 *
2717 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2718 * sleep 0.1 while a.status!='sleep'
2719 * puts "Got here"
2720 * a.run
2721 * a.join
2722 *
2723 * This will produce:
2724 *
2725 * a
2726 * Got here
2727 * c
2728 *
2729 * See also the instance method #wakeup.
2730 */
2731
2733rb_thread_run(VALUE thread)
2734{
2735 rb_thread_wakeup(thread);
2737 return thread;
2738}
2739
2740
2742rb_thread_stop(void)
2743{
2744 if (rb_thread_alone()) {
2746 "stopping only thread\n\tnote: use sleep to stop forever");
2747 }
2749 return Qnil;
2750}
2751
2752/*
2753 * call-seq:
2754 * Thread.stop -> nil
2755 *
2756 * Stops execution of the current thread, putting it into a ``sleep'' state,
2757 * and schedules execution of another thread.
2758 *
2759 * a = Thread.new { print "a"; Thread.stop; print "c" }
2760 * sleep 0.1 while a.status!='sleep'
2761 * print "b"
2762 * a.run
2763 * a.join
2764 * #=> "abc"
2765 */
2766
2767static VALUE
2768thread_stop(VALUE _)
2769{
2770 return rb_thread_stop();
2771}
2772
2773/********************************************************************/
2774
2775VALUE
2776rb_thread_list(void)
2777{
2778 // TODO
2779 return rb_ractor_thread_list(GET_RACTOR());
2780}
2781
2782/*
2783 * call-seq:
2784 * Thread.list -> array
2785 *
2786 * Returns an array of Thread objects for all threads that are either runnable
2787 * or stopped.
2788 *
2789 * Thread.new { sleep(200) }
2790 * Thread.new { 1000000.times {|i| i*i } }
2791 * Thread.new { Thread.stop }
2792 * Thread.list.each {|t| p t}
2793 *
2794 * This will produce:
2795 *
2796 * #<Thread:0x401b3e84 sleep>
2797 * #<Thread:0x401b3f38 run>
2798 * #<Thread:0x401b3fb0 sleep>
2799 * #<Thread:0x401bdf4c run>
2800 */
2801
2802static VALUE
2803thread_list(VALUE _)
2804{
2805 return rb_thread_list();
2806}
2807
2810{
2811 return GET_THREAD()->self;
2812}
2813
2814/*
2815 * call-seq:
2816 * Thread.current -> thread
2817 *
2818 * Returns the currently executing thread.
2819 *
2820 * Thread.current #=> #<Thread:0x401bdf4c run>
2821 */
2822
2823static VALUE
2824thread_s_current(VALUE klass)
2825{
2826 return rb_thread_current();
2827}
2828
2830rb_thread_main(void)
2831{
2832 return GET_RACTOR()->threads.main->self;
2833}
2834
2835/*
2836 * call-seq:
2837 * Thread.main -> thread
2838 *
2839 * Returns the main thread.
2840 */
2841
2842static VALUE
2843rb_thread_s_main(VALUE klass)
2844{
2845 return rb_thread_main();
2846}
2847
2848
2849/*
2850 * call-seq:
2851 * Thread.abort_on_exception -> true or false
2852 *
2853 * Returns the status of the global ``abort on exception'' condition.
2854 *
2855 * The default is +false+.
2856 *
2857 * When set to +true+, if any thread is aborted by an exception, the
2858 * raised exception will be re-raised in the main thread.
2859 *
2860 * Can also be specified by the global $DEBUG flag or command line option
2861 * +-d+.
2862 *
2863 * See also ::abort_on_exception=.
2864 *
2865 * There is also an instance level method to set this for a specific thread,
2866 * see #abort_on_exception.
2867 */
2868
2869static VALUE
2870rb_thread_s_abort_exc(VALUE _)
2871{
2872 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2873}
2874
2875
2876/*
2877 * call-seq:
2878 * Thread.abort_on_exception= boolean -> true or false
2879 *
2880 * When set to +true+, if any thread is aborted by an exception, the
2881 * raised exception will be re-raised in the main thread.
2882 * Returns the new state.
2883 *
2884 * Thread.abort_on_exception = true
2885 * t1 = Thread.new do
2886 * puts "In new thread"
2887 * raise "Exception from thread"
2888 * end
2889 * sleep(1)
2890 * puts "not reached"
2891 *
2892 * This will produce:
2893 *
2894 * In new thread
2895 * prog.rb:4: Exception from thread (RuntimeError)
2896 * from prog.rb:2:in `initialize'
2897 * from prog.rb:2:in `new'
2898 * from prog.rb:2
2899 *
2900 * See also ::abort_on_exception.
2901 *
2902 * There is also an instance level method to set this for a specific thread,
2903 * see #abort_on_exception=.
2904 */
2905
2906static VALUE
2907rb_thread_s_abort_exc_set(VALUE self, VALUE val)
2908{
2909 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2910 return val;
2911}
2912
2913
2914/*
2915 * call-seq:
2916 * thr.abort_on_exception -> true or false
2917 *
2918 * Returns the status of the thread-local ``abort on exception'' condition for
2919 * this +thr+.
2920 *
2921 * The default is +false+.
2922 *
2923 * See also #abort_on_exception=.
2924 *
2925 * There is also a class level method to set this for all threads, see
2926 * ::abort_on_exception.
2927 */
2928
2929static VALUE
2930rb_thread_abort_exc(VALUE thread)
2931{
2932 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
2933}
2934
2935
2936/*
2937 * call-seq:
2938 * thr.abort_on_exception= boolean -> true or false
2939 *
2940 * When set to +true+, if this +thr+ is aborted by an exception, the
2941 * raised exception will be re-raised in the main thread.
2942 *
2943 * See also #abort_on_exception.
2944 *
2945 * There is also a class level method to set this for all threads, see
2946 * ::abort_on_exception=.
2947 */
2948
2949static VALUE
2950rb_thread_abort_exc_set(VALUE thread, VALUE val)
2951{
2952 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
2953 return val;
2954}
2955
2956
2957/*
2958 * call-seq:
2959 * Thread.report_on_exception -> true or false
2960 *
2961 * Returns the status of the global ``report on exception'' condition.
2962 *
2963 * The default is +true+ since Ruby 2.5.
2964 *
2965 * All threads created when this flag is true will report
2966 * a message on $stderr if an exception kills the thread.
2967 *
2968 * Thread.new { 1.times { raise } }
2969 *
2970 * will produce this output on $stderr:
2971 *
2972 * #<Thread:...> terminated with exception (report_on_exception is true):
2973 * Traceback (most recent call last):
2974 * 2: from -e:1:in `block in <main>'
2975 * 1: from -e:1:in `times'
2976 *
2977 * This is done to catch errors in threads early.
2978 * In some cases, you might not want this output.
2979 * There are multiple ways to avoid the extra output:
2980 *
2981 * * If the exception is not intended, the best is to fix the cause of
2982 * the exception so it does not happen anymore.
2983 * * If the exception is intended, it might be better to rescue it closer to
2984 * where it is raised rather then let it kill the Thread.
2985 * * If it is guaranteed the Thread will be joined with Thread#join or
2986 * Thread#value, then it is safe to disable this report with
2987 * <code>Thread.current.report_on_exception = false</code>
2988 * when starting the Thread.
2989 * However, this might handle the exception much later, or not at all
2990 * if the Thread is never joined due to the parent thread being blocked, etc.
2991 *
2992 * See also ::report_on_exception=.
2993 *
2994 * There is also an instance level method to set this for a specific thread,
2995 * see #report_on_exception=.
2996 *
2997 */
2998
2999static VALUE
3000rb_thread_s_report_exc(VALUE _)
3001{
3002 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3003}
3004
3005
3006/*
3007 * call-seq:
3008 * Thread.report_on_exception= boolean -> true or false
3009 *
3010 * Returns the new state.
3011 * When set to +true+, all threads created afterwards will inherit the
3012 * condition and report a message on $stderr if an exception kills a thread:
3013 *
3014 * Thread.report_on_exception = true
3015 * t1 = Thread.new do
3016 * puts "In new thread"
3017 * raise "Exception from thread"
3018 * end
3019 * sleep(1)
3020 * puts "In the main thread"
3021 *
3022 * This will produce:
3023 *
3024 * In new thread
3025 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3026 * Traceback (most recent call last):
3027 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3028 * In the main thread
3029 *
3030 * See also ::report_on_exception.
3031 *
3032 * There is also an instance level method to set this for a specific thread,
3033 * see #report_on_exception=.
3034 */
3035
3036static VALUE
3037rb_thread_s_report_exc_set(VALUE self, VALUE val)
3038{
3039 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3040 return val;
3041}
3042
3043
3044/*
3045 * call-seq:
3046 * Thread.ignore_deadlock -> true or false
3047 *
3048 * Returns the status of the global ``ignore deadlock'' condition.
3049 * The default is +false+, so that deadlock conditions are not ignored.
3050 *
3051 * See also ::ignore_deadlock=.
3052 *
3053 */
3054
3055static VALUE
3056rb_thread_s_ignore_deadlock(VALUE _)
3057{
3058 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3059}
3060
3061
3062/*
3063 * call-seq:
3064 * Thread.ignore_deadlock = boolean -> true or false
3065 *
3066 * Returns the new state.
3067 * When set to +true+, the VM will not check for deadlock conditions.
3068 * It is only useful to set this if your application can break a
3069 * deadlock condition via some other means, such as a signal.
3070 *
3071 * Thread.ignore_deadlock = true
3072 * queue = Thread::Queue.new
3073 *
3074 * trap(:SIGUSR1){queue.push "Received signal"}
3075 *
3076 * # raises fatal error unless ignoring deadlock
3077 * puts queue.pop
3078 *
3079 * See also ::ignore_deadlock.
3080 */
3081
3082static VALUE
3083rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3084{
3085 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3086 return val;
3087}
3088
3089
3090/*
3091 * call-seq:
3092 * thr.report_on_exception -> true or false
3093 *
3094 * Returns the status of the thread-local ``report on exception'' condition for
3095 * this +thr+.
3096 *
3097 * The default value when creating a Thread is the value of
3098 * the global flag Thread.report_on_exception.
3099 *
3100 * See also #report_on_exception=.
3101 *
3102 * There is also a class level method to set this for all new threads, see
3103 * ::report_on_exception=.
3104 */
3105
3106static VALUE
3107rb_thread_report_exc(VALUE thread)
3108{
3109 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3110}
3111
3112
3113/*
3114 * call-seq:
3115 * thr.report_on_exception= boolean -> true or false
3116 *
3117 * When set to +true+, a message is printed on $stderr if an exception
3118 * kills this +thr+. See ::report_on_exception for details.
3119 *
3120 * See also #report_on_exception.
3121 *
3122 * There is also a class level method to set this for all new threads, see
3123 * ::report_on_exception=.
3124 */
3125
3126static VALUE
3127rb_thread_report_exc_set(VALUE thread, VALUE val)
3128{
3129 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3130 return val;
3131}
3132
3133
3134/*
3135 * call-seq:
3136 * thr.group -> thgrp or nil
3137 *
3138 * Returns the ThreadGroup which contains the given thread.
3139 *
3140 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3141 */
3142
3143VALUE
3144rb_thread_group(VALUE thread)
3145{
3146 return rb_thread_ptr(thread)->thgroup;
3147}
3148
3149static const char *
3150thread_status_name(rb_thread_t *th, int detail)
3151{
3152 switch (th->status) {
3153 case THREAD_RUNNABLE:
3154 return th->to_kill ? "aborting" : "run";
3155 case THREAD_STOPPED_FOREVER:
3156 if (detail) return "sleep_forever";
3157 case THREAD_STOPPED:
3158 return "sleep";
3159 case THREAD_KILLED:
3160 return "dead";
3161 default:
3162 return "unknown";
3163 }
3164}
3165
3166static int
3167rb_threadptr_dead(rb_thread_t *th)
3168{
3169 return th->status == THREAD_KILLED;
3170}
3171
3172
3173/*
3174 * call-seq:
3175 * thr.status -> string, false or nil
3176 *
3177 * Returns the status of +thr+.
3178 *
3179 * [<tt>"sleep"</tt>]
3180 * Returned if this thread is sleeping or waiting on I/O
3181 * [<tt>"run"</tt>]
3182 * When this thread is executing
3183 * [<tt>"aborting"</tt>]
3184 * If this thread is aborting
3185 * [+false+]
3186 * When this thread is terminated normally
3187 * [+nil+]
3188 * If terminated with an exception.
3189 *
3190 * a = Thread.new { raise("die now") }
3191 * b = Thread.new { Thread.stop }
3192 * c = Thread.new { Thread.exit }
3193 * d = Thread.new { sleep }
3194 * d.kill #=> #<Thread:0x401b3678 aborting>
3195 * a.status #=> nil
3196 * b.status #=> "sleep"
3197 * c.status #=> false
3198 * d.status #=> "aborting"
3199 * Thread.current.status #=> "run"
3200 *
3201 * See also the instance methods #alive? and #stop?
3202 */
3203
3204static VALUE
3205rb_thread_status(VALUE thread)
3206{
3207 rb_thread_t *target_th = rb_thread_ptr(thread);
3208
3209 if (rb_threadptr_dead(target_th)) {
3210 if (!NIL_P(target_th->ec->errinfo) &&
3211 !FIXNUM_P(target_th->ec->errinfo)) {
3212 return Qnil;
3213 }
3214 else {
3215 return Qfalse;
3216 }
3217 }
3218 else {
3219 return rb_str_new2(thread_status_name(target_th, FALSE));
3220 }
3221}
3222
3223
3224/*
3225 * call-seq:
3226 * thr.alive? -> true or false
3227 *
3228 * Returns +true+ if +thr+ is running or sleeping.
3229 *
3230 * thr = Thread.new { }
3231 * thr.join #=> #<Thread:0x401b3fb0 dead>
3232 * Thread.current.alive? #=> true
3233 * thr.alive? #=> false
3234 *
3235 * See also #stop? and #status.
3236 */
3237
3238static VALUE
3239rb_thread_alive_p(VALUE thread)
3240{
3241 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3242}
3243
3244/*
3245 * call-seq:
3246 * thr.stop? -> true or false
3247 *
3248 * Returns +true+ if +thr+ is dead or sleeping.
3249 *
3250 * a = Thread.new { Thread.stop }
3251 * b = Thread.current
3252 * a.stop? #=> true
3253 * b.stop? #=> false
3254 *
3255 * See also #alive? and #status.
3256 */
3257
3258static VALUE
3259rb_thread_stop_p(VALUE thread)
3260{
3261 rb_thread_t *th = rb_thread_ptr(thread);
3262
3263 if (rb_threadptr_dead(th)) {
3264 return Qtrue;
3265 }
3266 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3267}
3268
3269/*
3270 * call-seq:
3271 * thr.name -> string
3272 *
3273 * show the name of the thread.
3274 */
3275
3276static VALUE
3277rb_thread_getname(VALUE thread)
3278{
3279 return rb_thread_ptr(thread)->name;
3280}
3281
3282/*
3283 * call-seq:
3284 * thr.name=(name) -> string
3285 *
3286 * set given name to the ruby thread.
3287 * On some platform, it may set the name to pthread and/or kernel.
3288 */
3289
3290static VALUE
3291rb_thread_setname(VALUE thread, VALUE name)
3292{
3293 rb_thread_t *target_th = rb_thread_ptr(thread);
3294
3295 if (!NIL_P(name)) {
3296 rb_encoding *enc;
3297 StringValueCStr(name);
3298 enc = rb_enc_get(name);
3299 if (!rb_enc_asciicompat(enc)) {
3300 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3301 rb_enc_name(enc));
3302 }
3303 name = rb_str_new_frozen(name);
3304 }
3305 target_th->name = name;
3306 if (threadptr_initialized(target_th)) {
3307 native_set_another_thread_name(target_th->nt->thread_id, name);
3308 }
3309 return name;
3310}
3311
3312#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3313/*
3314 * call-seq:
3315 * thr.native_thread_id -> integer
3316 *
3317 * Return the native thread ID which is used by the Ruby thread.
3318 *
3319 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3320 * * On Linux it is TID returned by gettid(2).
3321 * * On macOS it is the system-wide unique integral ID of thread returned
3322 * by pthread_threadid_np(3).
3323 * * On FreeBSD it is the unique integral ID of the thread returned by
3324 * pthread_getthreadid_np(3).
3325 * * On Windows it is the thread identifier returned by GetThreadId().
3326 * * On other platforms, it raises NotImplementedError.
3327 *
3328 * NOTE:
3329 * If the thread is not associated yet or already deassociated with a native
3330 * thread, it returns _nil_.
3331 * If the Ruby implementation uses M:N thread model, the ID may change
3332 * depending on the timing.
3333 */
3334
3335static VALUE
3336rb_thread_native_thread_id(VALUE thread)
3337{
3338 rb_thread_t *target_th = rb_thread_ptr(thread);
3339 if (rb_threadptr_dead(target_th)) return Qnil;
3340 return native_thread_native_thread_id(target_th);
3341}
3342#else
3343# define rb_thread_native_thread_id rb_f_notimplement
3344#endif
3345
3346/*
3347 * call-seq:
3348 * thr.to_s -> string
3349 *
3350 * Dump the name, id, and status of _thr_ to a string.
3351 */
3352
3353static VALUE
3354rb_thread_to_s(VALUE thread)
3355{
3356 VALUE cname = rb_class_path(rb_obj_class(thread));
3357 rb_thread_t *target_th = rb_thread_ptr(thread);
3358 const char *status;
3359 VALUE str, loc;
3360
3361 status = thread_status_name(target_th, TRUE);
3362 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3363 if (!NIL_P(target_th->name)) {
3364 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3365 }
3366 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3367 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3368 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3369 }
3370 rb_str_catf(str, " %s>", status);
3371
3372 return str;
3373}
3374
3375/* variables for recursive traversals */
3376#define recursive_key id__recursive_key__
3377
3378static VALUE
3379threadptr_local_aref(rb_thread_t *th, ID id)
3380{
3381 if (id == recursive_key) {
3382 return th->ec->local_storage_recursive_hash;
3383 }
3384 else {
3385 VALUE val;
3386 struct rb_id_table *local_storage = th->ec->local_storage;
3387
3388 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3389 return val;
3390 }
3391 else {
3392 return Qnil;
3393 }
3394 }
3395}
3396
3398rb_thread_local_aref(VALUE thread, ID id)
3399{
3400 return threadptr_local_aref(rb_thread_ptr(thread), id);
3401}
3402
3403/*
3404 * call-seq:
3405 * thr[sym] -> obj or nil
3406 *
3407 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3408 * if not explicitly inside a Fiber), using either a symbol or a string name.
3409 * If the specified variable does not exist, returns +nil+.
3410 *
3411 * [
3412 * Thread.new { Thread.current["name"] = "A" },
3413 * Thread.new { Thread.current[:name] = "B" },
3414 * Thread.new { Thread.current["name"] = "C" }
3415 * ].each do |th|
3416 * th.join
3417 * puts "#{th.inspect}: #{th[:name]}"
3418 * end
3419 *
3420 * This will produce:
3421 *
3422 * #<Thread:0x00000002a54220 dead>: A
3423 * #<Thread:0x00000002a541a8 dead>: B
3424 * #<Thread:0x00000002a54130 dead>: C
3425 *
3426 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3427 * This confusion did not exist in Ruby 1.8 because
3428 * fibers are only available since Ruby 1.9.
3429 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3430 * following idiom for dynamic scope.
3431 *
3432 * def meth(newvalue)
3433 * begin
3434 * oldvalue = Thread.current[:name]
3435 * Thread.current[:name] = newvalue
3436 * yield
3437 * ensure
3438 * Thread.current[:name] = oldvalue
3439 * end
3440 * end
3441 *
3442 * The idiom may not work as dynamic scope if the methods are thread-local
3443 * and a given block switches fiber.
3444 *
3445 * f = Fiber.new {
3446 * meth(1) {
3447 * Fiber.yield
3448 * }
3449 * }
3450 * meth(2) {
3451 * f.resume
3452 * }
3453 * f.resume
3454 * p Thread.current[:name]
3455 * #=> nil if fiber-local
3456 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3457 *
3458 * For thread-local variables, please see #thread_variable_get and
3459 * #thread_variable_set.
3460 *
3461 */
3462
3463static VALUE
3464rb_thread_aref(VALUE thread, VALUE key)
3465{
3466 ID id = rb_check_id(&key);
3467 if (!id) return Qnil;
3468 return rb_thread_local_aref(thread, id);
3469}
3470
3471/*
3472 * call-seq:
3473 * thr.fetch(sym) -> obj
3474 * thr.fetch(sym) { } -> obj
3475 * thr.fetch(sym, default) -> obj
3476 *
3477 * Returns a fiber-local for the given key. If the key can't be
3478 * found, there are several options: With no other arguments, it will
3479 * raise a KeyError exception; if <i>default</i> is given, then that
3480 * will be returned; if the optional code block is specified, then
3481 * that will be run and its result returned. See Thread#[] and
3482 * Hash#fetch.
3483 */
3484static VALUE
3485rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3486{
3487 VALUE key, val;
3488 ID id;
3489 rb_thread_t *target_th = rb_thread_ptr(self);
3490 int block_given;
3491
3492 rb_check_arity(argc, 1, 2);
3493 key = argv[0];
3494
3495 block_given = rb_block_given_p();
3496 if (block_given && argc == 2) {
3497 rb_warn("block supersedes default value argument");
3498 }
3499
3500 id = rb_check_id(&key);
3501
3502 if (id == recursive_key) {
3503 return target_th->ec->local_storage_recursive_hash;
3504 }
3505 else if (id && target_th->ec->local_storage &&
3506 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3507 return val;
3508 }
3509 else if (block_given) {
3510 return rb_yield(key);
3511 }
3512 else if (argc == 1) {
3513 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3514 }
3515 else {
3516 return argv[1];
3517 }
3518}
3519
3520static VALUE
3521threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3522{
3523 if (id == recursive_key) {
3524 th->ec->local_storage_recursive_hash = val;
3525 return val;
3526 }
3527 else {
3528 struct rb_id_table *local_storage = th->ec->local_storage;
3529
3530 if (NIL_P(val)) {
3531 if (!local_storage) return Qnil;
3532 rb_id_table_delete(local_storage, id);
3533 return Qnil;
3534 }
3535 else {
3536 if (local_storage == NULL) {
3537 th->ec->local_storage = local_storage = rb_id_table_create(0);
3538 }
3539 rb_id_table_insert(local_storage, id, val);
3540 return val;
3541 }
3542 }
3543}
3544
3546rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3547{
3548 if (OBJ_FROZEN(thread)) {
3549 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3550 }
3551
3552 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3553}
3554
3555/*
3556 * call-seq:
3557 * thr[sym] = obj -> obj
3558 *
3559 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3560 * using either a symbol or a string.
3561 *
3562 * See also Thread#[].
3563 *
3564 * For thread-local variables, please see #thread_variable_set and
3565 * #thread_variable_get.
3566 */
3567
3568static VALUE
3569rb_thread_aset(VALUE self, VALUE id, VALUE val)
3570{
3571 return rb_thread_local_aset(self, rb_to_id(id), val);
3572}
3573
3574/*
3575 * call-seq:
3576 * thr.thread_variable_get(key) -> obj or nil
3577 *
3578 * Returns the value of a thread local variable that has been set. Note that
3579 * these are different than fiber local values. For fiber local values,
3580 * please see Thread#[] and Thread#[]=.
3581 *
3582 * Thread local values are carried along with threads, and do not respect
3583 * fibers. For example:
3584 *
3585 * Thread.new {
3586 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3587 * Thread.current["foo"] = "bar" # set a fiber local
3588 *
3589 * Fiber.new {
3590 * Fiber.yield [
3591 * Thread.current.thread_variable_get("foo"), # get the thread local
3592 * Thread.current["foo"], # get the fiber local
3593 * ]
3594 * }.resume
3595 * }.join.value # => ['bar', nil]
3596 *
3597 * The value "bar" is returned for the thread local, where nil is returned
3598 * for the fiber local. The fiber is executed in the same thread, so the
3599 * thread local values are available.
3600 */
3601
3602static VALUE
3603rb_thread_variable_get(VALUE thread, VALUE key)
3604{
3605 VALUE locals;
3606
3607 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3608 return Qnil;
3609 }
3610 locals = rb_thread_local_storage(thread);
3611 return rb_hash_aref(locals, rb_to_symbol(key));
3612}
3613
3614/*
3615 * call-seq:
3616 * thr.thread_variable_set(key, value)
3617 *
3618 * Sets a thread local with +key+ to +value+. Note that these are local to
3619 * threads, and not to fibers. Please see Thread#thread_variable_get and
3620 * Thread#[] for more information.
3621 */
3622
3623static VALUE
3624rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3625{
3626 VALUE locals;
3627
3628 if (OBJ_FROZEN(thread)) {
3629 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3630 }
3631
3632 locals = rb_thread_local_storage(thread);
3633 return rb_hash_aset(locals, rb_to_symbol(key), val);
3634}
3635
3636/*
3637 * call-seq:
3638 * thr.key?(sym) -> true or false
3639 *
3640 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3641 * variable.
3642 *
3643 * me = Thread.current
3644 * me[:oliver] = "a"
3645 * me.key?(:oliver) #=> true
3646 * me.key?(:stanley) #=> false
3647 */
3648
3649static VALUE
3650rb_thread_key_p(VALUE self, VALUE key)
3651{
3652 VALUE val;
3653 ID id = rb_check_id(&key);
3654 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3655
3656 if (!id || local_storage == NULL) {
3657 return Qfalse;
3658 }
3659 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3660}
3661
3662static enum rb_id_table_iterator_result
3663thread_keys_i(ID key, VALUE value, void *ary)
3664{
3665 rb_ary_push((VALUE)ary, ID2SYM(key));
3666 return ID_TABLE_CONTINUE;
3667}
3668
3670rb_thread_alone(void)
3671{
3672 // TODO
3673 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3674}
3675
3676/*
3677 * call-seq:
3678 * thr.keys -> array
3679 *
3680 * Returns an array of the names of the fiber-local variables (as Symbols).
3681 *
3682 * thr = Thread.new do
3683 * Thread.current[:cat] = 'meow'
3684 * Thread.current["dog"] = 'woof'
3685 * end
3686 * thr.join #=> #<Thread:0x401b3f10 dead>
3687 * thr.keys #=> [:dog, :cat]
3688 */
3689
3690static VALUE
3691rb_thread_keys(VALUE self)
3692{
3693 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3694 VALUE ary = rb_ary_new();
3695
3696 if (local_storage) {
3697 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3698 }
3699 return ary;
3700}
3701
3702static int
3703keys_i(VALUE key, VALUE value, VALUE ary)
3704{
3705 rb_ary_push(ary, key);
3706 return ST_CONTINUE;
3707}
3708
3709/*
3710 * call-seq:
3711 * thr.thread_variables -> array
3712 *
3713 * Returns an array of the names of the thread-local variables (as Symbols).
3714 *
3715 * thr = Thread.new do
3716 * Thread.current.thread_variable_set(:cat, 'meow')
3717 * Thread.current.thread_variable_set("dog", 'woof')
3718 * end
3719 * thr.join #=> #<Thread:0x401b3f10 dead>
3720 * thr.thread_variables #=> [:dog, :cat]
3721 *
3722 * Note that these are not fiber local variables. Please see Thread#[] and
3723 * Thread#thread_variable_get for more details.
3724 */
3725
3726static VALUE
3727rb_thread_variables(VALUE thread)
3728{
3729 VALUE locals;
3730 VALUE ary;
3731
3732 ary = rb_ary_new();
3733 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3734 return ary;
3735 }
3736 locals = rb_thread_local_storage(thread);
3737 rb_hash_foreach(locals, keys_i, ary);
3738
3739 return ary;
3740}
3741
3742/*
3743 * call-seq:
3744 * thr.thread_variable?(key) -> true or false
3745 *
3746 * Returns +true+ if the given string (or symbol) exists as a thread-local
3747 * variable.
3748 *
3749 * me = Thread.current
3750 * me.thread_variable_set(:oliver, "a")
3751 * me.thread_variable?(:oliver) #=> true
3752 * me.thread_variable?(:stanley) #=> false
3753 *
3754 * Note that these are not fiber local variables. Please see Thread#[] and
3755 * Thread#thread_variable_get for more details.
3756 */
3757
3758static VALUE
3759rb_thread_variable_p(VALUE thread, VALUE key)
3760{
3761 VALUE locals;
3762
3763 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3764 return Qfalse;
3765 }
3766 locals = rb_thread_local_storage(thread);
3767
3768 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3769}
3770
3771/*
3772 * call-seq:
3773 * thr.priority -> integer
3774 *
3775 * Returns the priority of <i>thr</i>. Default is inherited from the
3776 * current thread which creating the new thread, or zero for the
3777 * initial main thread; higher-priority thread will run more frequently
3778 * than lower-priority threads (but lower-priority threads can also run).
3779 *
3780 * This is just hint for Ruby thread scheduler. It may be ignored on some
3781 * platform.
3782 *
3783 * Thread.current.priority #=> 0
3784 */
3785
3786static VALUE
3787rb_thread_priority(VALUE thread)
3788{
3789 return INT2NUM(rb_thread_ptr(thread)->priority);
3790}
3791
3792
3793/*
3794 * call-seq:
3795 * thr.priority= integer -> thr
3796 *
3797 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3798 * will run more frequently than lower-priority threads (but lower-priority
3799 * threads can also run).
3800 *
3801 * This is just hint for Ruby thread scheduler. It may be ignored on some
3802 * platform.
3803 *
3804 * count1 = count2 = 0
3805 * a = Thread.new do
3806 * loop { count1 += 1 }
3807 * end
3808 * a.priority = -1
3809 *
3810 * b = Thread.new do
3811 * loop { count2 += 1 }
3812 * end
3813 * b.priority = -2
3814 * sleep 1 #=> 1
3815 * count1 #=> 622504
3816 * count2 #=> 5832
3817 */
3818
3819static VALUE
3820rb_thread_priority_set(VALUE thread, VALUE prio)
3821{
3822 rb_thread_t *target_th = rb_thread_ptr(thread);
3823 int priority;
3824
3825#if USE_NATIVE_THREAD_PRIORITY
3826 target_th->priority = NUM2INT(prio);
3827 native_thread_apply_priority(th);
3828#else
3829 priority = NUM2INT(prio);
3830 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3831 priority = RUBY_THREAD_PRIORITY_MAX;
3832 }
3833 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3834 priority = RUBY_THREAD_PRIORITY_MIN;
3835 }
3836 target_th->priority = (int8_t)priority;
3837#endif
3838 return INT2NUM(target_th->priority);
3839}
3840
3841/* for IO */
3842
3843#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3844
3845/*
3846 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3847 * in select(2) system call.
3848 *
3849 * - Linux 2.2.12 (?)
3850 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3851 * select(2) documents how to allocate fd_set dynamically.
3852 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3853 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3854 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3855 * select(2) documents how to allocate fd_set dynamically.
3856 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3857 * - Solaris 8 has select_large_fdset
3858 * - Mac OS X 10.7 (Lion)
3859 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3860 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3861 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
3862 *
3863 * When fd_set is not big enough to hold big file descriptors,
3864 * it should be allocated dynamically.
3865 * Note that this assumes fd_set is structured as bitmap.
3866 *
3867 * rb_fd_init allocates the memory.
3868 * rb_fd_term free the memory.
3869 * rb_fd_set may re-allocates bitmap.
3870 *
3871 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3872 */
3873
3874void
3876{
3877 fds->maxfd = 0;
3878 fds->fdset = ALLOC(fd_set);
3879 FD_ZERO(fds->fdset);
3880}
3881
3882void
3883rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3884{
3885 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3886
3887 if (size < sizeof(fd_set))
3888 size = sizeof(fd_set);
3889 dst->maxfd = src->maxfd;
3890 dst->fdset = xmalloc(size);
3891 memcpy(dst->fdset, src->fdset, size);
3892}
3893
3894void
3896{
3897 if (fds->fdset) xfree(fds->fdset);
3898 fds->maxfd = 0;
3899 fds->fdset = 0;
3900}
3901
3902void
3904{
3905 if (fds->fdset)
3906 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3907}
3908
3909static void
3910rb_fd_resize(int n, rb_fdset_t *fds)
3911{
3912 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3913 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3914
3915 if (m < sizeof(fd_set)) m = sizeof(fd_set);
3916 if (o < sizeof(fd_set)) o = sizeof(fd_set);
3917
3918 if (m > o) {
3919 fds->fdset = xrealloc(fds->fdset, m);
3920 memset((char *)fds->fdset + o, 0, m - o);
3921 }
3922 if (n >= fds->maxfd) fds->maxfd = n + 1;
3923}
3924
3925void
3926rb_fd_set(int n, rb_fdset_t *fds)
3927{
3928 rb_fd_resize(n, fds);
3929 FD_SET(n, fds->fdset);
3930}
3931
3932void
3933rb_fd_clr(int n, rb_fdset_t *fds)
3934{
3935 if (n >= fds->maxfd) return;
3936 FD_CLR(n, fds->fdset);
3937}
3938
3939int
3940rb_fd_isset(int n, const rb_fdset_t *fds)
3941{
3942 if (n >= fds->maxfd) return 0;
3943 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3944}
3945
3946void
3947rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3948{
3949 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3950
3951 if (size < sizeof(fd_set)) size = sizeof(fd_set);
3952 dst->maxfd = max;
3953 dst->fdset = xrealloc(dst->fdset, size);
3954 memcpy(dst->fdset, src, size);
3955}
3956
3957void
3958rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3959{
3960 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3961
3962 if (size < sizeof(fd_set))
3963 size = sizeof(fd_set);
3964 dst->maxfd = src->maxfd;
3965 dst->fdset = xrealloc(dst->fdset, size);
3966 memcpy(dst->fdset, src->fdset, size);
3967}
3968
3969int
3970rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3971{
3972 fd_set *r = NULL, *w = NULL, *e = NULL;
3973 if (readfds) {
3974 rb_fd_resize(n - 1, readfds);
3975 r = rb_fd_ptr(readfds);
3976 }
3977 if (writefds) {
3978 rb_fd_resize(n - 1, writefds);
3979 w = rb_fd_ptr(writefds);
3980 }
3981 if (exceptfds) {
3982 rb_fd_resize(n - 1, exceptfds);
3983 e = rb_fd_ptr(exceptfds);
3984 }
3985 return select(n, r, w, e, timeout);
3986}
3987
3988#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
3989
3990#undef FD_ZERO
3991#undef FD_SET
3992#undef FD_CLR
3993#undef FD_ISSET
3994
3995#define FD_ZERO(f) rb_fd_zero(f)
3996#define FD_SET(i, f) rb_fd_set((i), (f))
3997#define FD_CLR(i, f) rb_fd_clr((i), (f))
3998#define FD_ISSET(i, f) rb_fd_isset((i), (f))
3999
4000#elif defined(_WIN32)
4001
4002void
4004{
4005 set->capa = FD_SETSIZE;
4006 set->fdset = ALLOC(fd_set);
4007 FD_ZERO(set->fdset);
4008}
4009
4010void
4011rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4012{
4013 rb_fd_init(dst);
4014 rb_fd_dup(dst, src);
4015}
4016
4017void
4019{
4020 xfree(set->fdset);
4021 set->fdset = NULL;
4022 set->capa = 0;
4023}
4024
4025void
4026rb_fd_set(int fd, rb_fdset_t *set)
4027{
4028 unsigned int i;
4029 SOCKET s = rb_w32_get_osfhandle(fd);
4030
4031 for (i = 0; i < set->fdset->fd_count; i++) {
4032 if (set->fdset->fd_array[i] == s) {
4033 return;
4034 }
4035 }
4036 if (set->fdset->fd_count >= (unsigned)set->capa) {
4037 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4038 set->fdset =
4039 rb_xrealloc_mul_add(
4040 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4041 }
4042 set->fdset->fd_array[set->fdset->fd_count++] = s;
4043}
4044
4045#undef FD_ZERO
4046#undef FD_SET
4047#undef FD_CLR
4048#undef FD_ISSET
4049
4050#define FD_ZERO(f) rb_fd_zero(f)
4051#define FD_SET(i, f) rb_fd_set((i), (f))
4052#define FD_CLR(i, f) rb_fd_clr((i), (f))
4053#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4054
4055#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4056
4057#endif
4058
4059#ifndef rb_fd_no_init
4060#define rb_fd_no_init(fds) (void)(fds)
4061#endif
4062
4063static int
4064wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4065{
4066 if (*result < 0) {
4067 switch (errnum) {
4068 case EINTR:
4069#ifdef ERESTART
4070 case ERESTART:
4071#endif
4072 *result = 0;
4073 if (rel && hrtime_update_expire(rel, end)) {
4074 *rel = 0;
4075 }
4076 return TRUE;
4077 }
4078 return FALSE;
4079 }
4080 else if (*result == 0) {
4081 /* check for spurious wakeup */
4082 if (rel) {
4083 return !hrtime_update_expire(rel, end);
4084 }
4085 return TRUE;
4086 }
4087 return FALSE;
4088}
4090struct select_set {
4091 int max;
4092 int sigwait_fd;
4093 rb_thread_t *th;
4094 rb_fdset_t *rset;
4095 rb_fdset_t *wset;
4096 rb_fdset_t *eset;
4097 rb_fdset_t orig_rset;
4098 rb_fdset_t orig_wset;
4099 rb_fdset_t orig_eset;
4100 struct timeval *timeout;
4101};
4102
4103static VALUE
4104select_set_free(VALUE p)
4105{
4106 struct select_set *set = (struct select_set *)p;
4107
4108 if (set->sigwait_fd >= 0) {
4109 rb_sigwait_fd_put(set->th, set->sigwait_fd);
4110 rb_sigwait_fd_migrate(set->th->vm);
4111 }
4112
4113 rb_fd_term(&set->orig_rset);
4114 rb_fd_term(&set->orig_wset);
4115 rb_fd_term(&set->orig_eset);
4116
4117 return Qfalse;
4118}
4119
4120static const rb_hrtime_t *
4121sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
4122 int *drained_p)
4123{
4124 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4125
4126 if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
4127 *drained_p = check_signals_nogvl(th, sigwait_fd);
4128 if (!orig || *orig > quantum)
4129 return &quantum;
4130 }
4131
4132 return orig;
4133}
4134
4135#define sigwait_signals_fd(result, cond, sigwait_fd) \
4136 (result > 0 && (cond) ? (result--, (sigwait_fd)) : -1)
4137
4138static VALUE
4139do_select(VALUE p)
4140{
4141 struct select_set *set = (struct select_set *)p;
4142 int result = 0;
4143 int lerrno;
4144 rb_hrtime_t *to, rel, end = 0;
4145
4146 timeout_prepare(&to, &rel, &end, set->timeout);
4147#define restore_fdset(dst, src) \
4148 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4149#define do_select_update() \
4150 (restore_fdset(set->rset, &set->orig_rset), \
4151 restore_fdset(set->wset, &set->orig_wset), \
4152 restore_fdset(set->eset, &set->orig_eset), \
4153 TRUE)
4154
4155 do {
4156 int drained;
4157 lerrno = 0;
4158
4159 BLOCKING_REGION(set->th, {
4160 const rb_hrtime_t *sto;
4161 struct timeval tv;
4162
4163 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4164 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4165 result = native_fd_select(set->max, set->rset, set->wset,
4166 set->eset,
4167 rb_hrtime2timeval(&tv, sto), set->th);
4168 if (result < 0) lerrno = errno;
4169 }
4170 }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4171
4172 if (set->sigwait_fd >= 0) {
4173 int fd = sigwait_signals_fd(result,
4174 rb_fd_isset(set->sigwait_fd, set->rset),
4175 set->sigwait_fd);
4176 (void)check_signals_nogvl(set->th, fd);
4177 }
4178
4179 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4180 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4181
4182 if (result < 0) {
4183 errno = lerrno;
4184 }
4185
4186 return (VALUE)result;
4187}
4188
4189static rb_fdset_t *
4190init_set_fd(int fd, rb_fdset_t *fds)
4191{
4192 if (fd < 0) {
4193 return 0;
4194 }
4195 rb_fd_init(fds);
4196 rb_fd_set(fd, fds);
4197
4198 return fds;
4199}
4200
4202rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4203 struct timeval *timeout)
4204{
4205 struct select_set set;
4206
4207 set.th = GET_THREAD();
4208 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4209 set.max = max;
4210 set.rset = read;
4211 set.wset = write;
4212 set.eset = except;
4213 set.timeout = timeout;
4214
4215 if (!set.rset && !set.wset && !set.eset) {
4216 if (!timeout) {
4218 return 0;
4219 }
4220 rb_thread_wait_for(*timeout);
4221 return 0;
4222 }
4223
4224 set.sigwait_fd = rb_sigwait_fd_get(set.th);
4225 if (set.sigwait_fd >= 0) {
4226 if (set.rset)
4227 rb_fd_set(set.sigwait_fd, set.rset);
4228 else
4229 set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4230 if (set.sigwait_fd >= set.max) {
4231 set.max = set.sigwait_fd + 1;
4232 }
4233 }
4234#define fd_init_copy(f) do { \
4235 if (set.f) { \
4236 rb_fd_resize(set.max - 1, set.f); \
4237 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4238 rb_fd_init_copy(&set.orig_##f, set.f); \
4239 } \
4240 } \
4241 else { \
4242 rb_fd_no_init(&set.orig_##f); \
4243 } \
4244 } while (0)
4245 fd_init_copy(rset);
4246 fd_init_copy(wset);
4247 fd_init_copy(eset);
4248#undef fd_init_copy
4249
4250 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4251}
4252
4253#ifdef USE_POLL
4254
4255/* The same with linux kernel. TODO: make platform independent definition. */
4256#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4257#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4258#define POLLEX_SET (POLLPRI)
4259
4260#ifndef POLLERR_SET /* defined for FreeBSD for now */
4261# define POLLERR_SET (0)
4262#endif
4263
4264/*
4265 * returns a mask of events
4266 */
4267int
4268rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4269{
4270 struct pollfd fds[2];
4271 int result = 0;
4272 int drained;
4273 nfds_t nfds;
4275 struct waiting_fd wfd;
4276 int state;
4277 volatile int lerrno;
4278
4279 wfd.th = GET_THREAD();
4280 wfd.fd = fd;
4281
4282 RB_VM_LOCK_ENTER();
4283 {
4284 ccan_list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4285 }
4286 RB_VM_LOCK_LEAVE();
4287
4288 EC_PUSH_TAG(wfd.th->ec);
4289 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4290 rb_hrtime_t *to, rel, end = 0;
4291 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4292 timeout_prepare(&to, &rel, &end, timeout);
4293 fds[0].fd = fd;
4294 fds[0].events = (short)events;
4295 fds[0].revents = 0;
4296 do {
4297 fds[1].fd = rb_sigwait_fd_get(wfd.th);
4298
4299 if (fds[1].fd >= 0) {
4300 fds[1].events = POLLIN;
4301 fds[1].revents = 0;
4302 nfds = 2;
4303 ubf = ubf_sigwait;
4304 }
4305 else {
4306 nfds = 1;
4307 ubf = ubf_select;
4308 }
4309
4310 lerrno = 0;
4311 BLOCKING_REGION(wfd.th, {
4312 const rb_hrtime_t *sto;
4313 struct timespec ts;
4314
4315 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4316 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4317 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4318 if (result < 0) lerrno = errno;
4319 }
4320 }, ubf, wfd.th, TRUE);
4321
4322 if (fds[1].fd >= 0) {
4323 int fd1 = sigwait_signals_fd(result, fds[1].revents, fds[1].fd);
4324 (void)check_signals_nogvl(wfd.th, fd1);
4325 rb_sigwait_fd_put(wfd.th, fds[1].fd);
4326 rb_sigwait_fd_migrate(wfd.th->vm);
4327 }
4328 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4329 } while (wait_retryable(&result, lerrno, to, end));
4330 }
4331 EC_POP_TAG();
4332
4333 RB_VM_LOCK_ENTER();
4334 {
4335 ccan_list_del(&wfd.wfd_node);
4336 }
4337 RB_VM_LOCK_LEAVE();
4338
4339 if (state) {
4340 EC_JUMP_TAG(wfd.th->ec, state);
4341 }
4342
4343 if (result < 0) {
4344 errno = lerrno;
4345 return -1;
4346 }
4347
4348 if (fds[0].revents & POLLNVAL) {
4349 errno = EBADF;
4350 return -1;
4351 }
4352
4353 /*
4354 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4355 * Therefore we need to fix it up.
4356 */
4357 result = 0;
4358 if (fds[0].revents & POLLIN_SET)
4359 result |= RB_WAITFD_IN;
4360 if (fds[0].revents & POLLOUT_SET)
4361 result |= RB_WAITFD_OUT;
4362 if (fds[0].revents & POLLEX_SET)
4363 result |= RB_WAITFD_PRI;
4364
4365 /* all requested events are ready if there is an error */
4366 if (fds[0].revents & POLLERR_SET)
4367 result |= events;
4368
4369 return result;
4370}
4371#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4372struct select_args {
4373 union {
4374 int fd;
4375 int error;
4376 } as;
4377 rb_fdset_t *read;
4378 rb_fdset_t *write;
4379 rb_fdset_t *except;
4380 struct waiting_fd wfd;
4381 struct timeval *tv;
4382};
4383
4384static VALUE
4385select_single(VALUE ptr)
4386{
4387 struct select_args *args = (struct select_args *)ptr;
4388 int r;
4389
4390 r = rb_thread_fd_select(args->as.fd + 1,
4391 args->read, args->write, args->except, args->tv);
4392 if (r == -1)
4393 args->as.error = errno;
4394 if (r > 0) {
4395 r = 0;
4396 if (args->read && rb_fd_isset(args->as.fd, args->read))
4397 r |= RB_WAITFD_IN;
4398 if (args->write && rb_fd_isset(args->as.fd, args->write))
4399 r |= RB_WAITFD_OUT;
4400 if (args->except && rb_fd_isset(args->as.fd, args->except))
4401 r |= RB_WAITFD_PRI;
4402 }
4403 return (VALUE)r;
4404}
4405
4406static VALUE
4407select_single_cleanup(VALUE ptr)
4408{
4409 struct select_args *args = (struct select_args *)ptr;
4410
4411 RB_VM_LOCK_ENTER();
4412 {
4413 ccan_list_del(&args->wfd.wfd_node);
4414 }
4415 RB_VM_LOCK_LEAVE();
4416 if (args->read) rb_fd_term(args->read);
4417 if (args->write) rb_fd_term(args->write);
4418 if (args->except) rb_fd_term(args->except);
4419
4420 return (VALUE)-1;
4421}
4422
4423int
4424rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4425{
4426 rb_fdset_t rfds, wfds, efds;
4427 struct select_args args;
4428 int r;
4429 VALUE ptr = (VALUE)&args;
4430
4431 args.as.fd = fd;
4432 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4433 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4434 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4435 args.tv = timeout;
4436 args.wfd.fd = fd;
4437 args.wfd.th = GET_THREAD();
4438
4439 RB_VM_LOCK_ENTER();
4440 {
4441 ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4442 }
4443 RB_VM_LOCK_LEAVE();
4444
4445 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4446 if (r == -1)
4447 errno = args.as.error;
4448
4449 return r;
4450}
4451#endif /* ! USE_POLL */
4452
4453/*
4454 * for GC
4455 */
4456
4457#ifdef USE_CONSERVATIVE_STACK_END
4458void
4459rb_gc_set_stack_end(VALUE **stack_end_p)
4460{
4461 VALUE stack_end;
4462 *stack_end_p = &stack_end;
4463}
4464#endif
4465
4466/*
4467 *
4468 */
4469
4470void
4471rb_threadptr_check_signal(rb_thread_t *mth)
4472{
4473 /* mth must be main_thread */
4474 if (rb_signal_buff_size() > 0) {
4475 /* wakeup main thread */
4476 threadptr_trap_interrupt(mth);
4477 }
4478}
4479
4480static void
4481async_bug_fd(const char *mesg, int errno_arg, int fd)
4482{
4483 char buff[64];
4484 size_t n = strlcpy(buff, mesg, sizeof(buff));
4485 if (n < sizeof(buff)-3) {
4486 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4487 }
4488 rb_async_bug_errno(buff, errno_arg);
4489}
4490
4491/* VM-dependent API is not available for this function */
4492static int
4493consume_communication_pipe(int fd)
4494{
4495#if USE_EVENTFD
4496 uint64_t buff[1];
4497#else
4498 /* buffer can be shared because no one refers to them. */
4499 static char buff[1024];
4500#endif
4501 ssize_t result;
4502 int ret = FALSE; /* for rb_sigwait_sleep */
4503
4504 /*
4505 * disarm UBF_TIMER before we read, because it can become
4506 * re-armed at any time via sighandler and the pipe will refill
4507 * We can disarm it because this thread is now processing signals
4508 * and we do not want unnecessary SIGVTALRM
4509 */
4510 ubf_timer_disarm();
4511
4512 while (1) {
4513 result = read(fd, buff, sizeof(buff));
4514 if (result > 0) {
4515 ret = TRUE;
4516 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4517 return ret;
4518 }
4519 }
4520 else if (result == 0) {
4521 return ret;
4522 }
4523 else if (result < 0) {
4524 int e = errno;
4525 switch (e) {
4526 case EINTR:
4527 continue; /* retry */
4528 case EAGAIN:
4529#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4530 case EWOULDBLOCK:
4531#endif
4532 return ret;
4533 default:
4534 async_bug_fd("consume_communication_pipe: read", e, fd);
4535 }
4536 }
4537 }
4538}
4539
4540static int
4541check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4542{
4543 rb_vm_t *vm = GET_VM(); /* th may be 0 */
4544 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4545 ubf_wakeup_all_threads();
4546 ruby_sigchld_handler(vm);
4547 if (rb_signal_buff_size()) {
4548 if (th == vm->ractor.main_thread) {
4549 /* no need to lock + wakeup if already in main thread */
4550 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
4551 }
4552 else {
4553 threadptr_trap_interrupt(vm->ractor.main_thread);
4554 }
4555 ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4556 }
4557 return ret;
4558}
4559
4560void
4561rb_thread_stop_timer_thread(void)
4562{
4563 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4564 native_reset_timer_thread();
4565 }
4566}
4567
4568void
4569rb_thread_reset_timer_thread(void)
4570{
4571 native_reset_timer_thread();
4572}
4573
4574void
4575rb_thread_start_timer_thread(void)
4576{
4577 system_working = 1;
4578 rb_thread_create_timer_thread();
4579}
4580
4581static int
4582clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4583{
4584 int i;
4585 VALUE coverage = (VALUE)val;
4586 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4587 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4588
4589 if (lines) {
4590 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4591 rb_ary_clear(lines);
4592 }
4593 else {
4594 int i;
4595 for (i = 0; i < RARRAY_LEN(lines); i++) {
4596 if (RARRAY_AREF(lines, i) != Qnil)
4597 RARRAY_ASET(lines, i, INT2FIX(0));
4598 }
4599 }
4600 }
4601 if (branches) {
4602 VALUE counters = RARRAY_AREF(branches, 1);
4603 for (i = 0; i < RARRAY_LEN(counters); i++) {
4604 RARRAY_ASET(counters, i, INT2FIX(0));
4605 }
4606 }
4607
4608 return ST_CONTINUE;
4609}
4610
4611void
4612rb_clear_coverages(void)
4613{
4614 VALUE coverages = rb_get_coverages();
4615 if (RTEST(coverages)) {
4616 rb_hash_foreach(coverages, clear_coverage_i, 0);
4617 }
4618}
4619
4620#if defined(HAVE_WORKING_FORK)
4621
4622static void
4623rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4624{
4625 rb_thread_t *i = 0;
4626 rb_vm_t *vm = th->vm;
4627 rb_ractor_t *r = th->ractor;
4628 vm->ractor.main_ractor = r;
4629 vm->ractor.main_thread = th;
4630 r->threads.main = th;
4631 r->status_ = ractor_created;
4632
4633 thread_sched_atfork(TH_SCHED(th));
4634 ubf_list_atfork();
4635
4636 // OK. Only this thread accesses:
4637 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4638 ccan_list_for_each(&r->threads.set, i, lt_node) {
4639 atfork(i, th);
4640 }
4641 }
4642 rb_vm_living_threads_init(vm);
4643
4644 rb_ractor_atfork(vm, th);
4645
4646 /* may be held by MJIT threads in parent */
4647 rb_native_mutex_initialize(&vm->waitpid_lock);
4648 rb_native_mutex_initialize(&vm->workqueue_lock);
4649
4650 /* may be held by any thread in parent */
4651 rb_native_mutex_initialize(&th->interrupt_lock);
4652
4653 vm->fork_gen++;
4654 rb_ractor_sleeper_threads_clear(th->ractor);
4655 rb_clear_coverages();
4656
4657 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4658 VM_ASSERT(vm->ractor.cnt == 1);
4659}
4660
4661static void
4662terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4663{
4664 if (th != current_th) {
4665 rb_mutex_abandon_keeping_mutexes(th);
4666 rb_mutex_abandon_locking_mutex(th);
4667 thread_cleanup_func(th, TRUE);
4668 }
4669}
4670
4671void rb_fiber_atfork(rb_thread_t *);
4672void
4673rb_thread_atfork(void)
4674{
4675 rb_thread_t *th = GET_THREAD();
4676 rb_threadptr_pending_interrupt_clear(th);
4677 rb_thread_atfork_internal(th, terminate_atfork_i);
4678 th->join_list = NULL;
4679 rb_fiber_atfork(th);
4680
4681 /* We don't want reproduce CVE-2003-0900. */
4683
4684 /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4685 mjit_child_after_fork();
4686}
4687
4688static void
4689terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4690{
4691 if (th != current_th) {
4692 thread_cleanup_func_before_exec(th);
4693 }
4694}
4695
4696void
4698{
4699 rb_thread_t *th = GET_THREAD();
4700 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4701}
4702#else
4703void
4704rb_thread_atfork(void)
4705{
4706}
4707
4708void
4710{
4711}
4712#endif
4714struct thgroup {
4715 int enclosed;
4716 VALUE group;
4717};
4718
4719static size_t
4720thgroup_memsize(const void *ptr)
4721{
4722 return sizeof(struct thgroup);
4723}
4724
4725static const rb_data_type_t thgroup_data_type = {
4726 "thgroup",
4727 {0, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4728 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4729};
4730
4731/*
4732 * Document-class: ThreadGroup
4733 *
4734 * ThreadGroup provides a means of keeping track of a number of threads as a
4735 * group.
4736 *
4737 * A given Thread object can only belong to one ThreadGroup at a time; adding
4738 * a thread to a new group will remove it from any previous group.
4739 *
4740 * Newly created threads belong to the same group as the thread from which they
4741 * were created.
4742 */
4743
4744/*
4745 * Document-const: Default
4746 *
4747 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4748 * by default.
4749 */
4750static VALUE
4751thgroup_s_alloc(VALUE klass)
4752{
4753 VALUE group;
4754 struct thgroup *data;
4755
4756 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4757 data->enclosed = 0;
4758 data->group = group;
4759
4760 return group;
4761}
4762
4763/*
4764 * call-seq:
4765 * thgrp.list -> array
4766 *
4767 * Returns an array of all existing Thread objects that belong to this group.
4768 *
4769 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4770 */
4771
4772static VALUE
4773thgroup_list(VALUE group)
4774{
4775 VALUE ary = rb_ary_new();
4776 rb_thread_t *th = 0;
4777 rb_ractor_t *r = GET_RACTOR();
4778
4779 ccan_list_for_each(&r->threads.set, th, lt_node) {
4780 if (th->thgroup == group) {
4781 rb_ary_push(ary, th->self);
4782 }
4783 }
4784 return ary;
4785}
4786
4787
4788/*
4789 * call-seq:
4790 * thgrp.enclose -> thgrp
4791 *
4792 * Prevents threads from being added to or removed from the receiving
4793 * ThreadGroup.
4794 *
4795 * New threads can still be started in an enclosed ThreadGroup.
4796 *
4797 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4798 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4799 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4800 * tg.add thr
4801 * #=> ThreadError: can't move from the enclosed thread group
4802 */
4803
4804static VALUE
4805thgroup_enclose(VALUE group)
4806{
4807 struct thgroup *data;
4808
4809 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4810 data->enclosed = 1;
4811
4812 return group;
4813}
4814
4815
4816/*
4817 * call-seq:
4818 * thgrp.enclosed? -> true or false
4819 *
4820 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4821 */
4822
4823static VALUE
4824thgroup_enclosed_p(VALUE group)
4825{
4826 struct thgroup *data;
4827
4828 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4829 return RBOOL(data->enclosed);
4830}
4831
4832
4833/*
4834 * call-seq:
4835 * thgrp.add(thread) -> thgrp
4836 *
4837 * Adds the given +thread+ to this group, removing it from any other
4838 * group to which it may have previously been a member.
4839 *
4840 * puts "Initial group is #{ThreadGroup::Default.list}"
4841 * tg = ThreadGroup.new
4842 * t1 = Thread.new { sleep }
4843 * t2 = Thread.new { sleep }
4844 * puts "t1 is #{t1}"
4845 * puts "t2 is #{t2}"
4846 * tg.add(t1)
4847 * puts "Initial group now #{ThreadGroup::Default.list}"
4848 * puts "tg group now #{tg.list}"
4849 *
4850 * This will produce:
4851 *
4852 * Initial group is #<Thread:0x401bdf4c>
4853 * t1 is #<Thread:0x401b3c90>
4854 * t2 is #<Thread:0x401b3c18>
4855 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4856 * tg group now #<Thread:0x401b3c90>
4857 */
4858
4859static VALUE
4860thgroup_add(VALUE group, VALUE thread)
4861{
4862 rb_thread_t *target_th = rb_thread_ptr(thread);
4863 struct thgroup *data;
4864
4865 if (OBJ_FROZEN(group)) {
4866 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4867 }
4868 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4869 if (data->enclosed) {
4870 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4871 }
4872
4873 if (OBJ_FROZEN(target_th->thgroup)) {
4874 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4875 }
4876 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4877 if (data->enclosed) {
4879 "can't move from the enclosed thread group");
4880 }
4881
4882 target_th->thgroup = group;
4883 return group;
4884}
4885
4886/*
4887 * Document-class: ThreadShield
4888 */
4889static void
4890thread_shield_mark(void *ptr)
4891{
4892 rb_gc_mark((VALUE)ptr);
4893}
4894
4895static const rb_data_type_t thread_shield_data_type = {
4896 "thread_shield",
4897 {thread_shield_mark, 0, 0,},
4898 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4899};
4900
4901static VALUE
4902thread_shield_alloc(VALUE klass)
4903{
4904 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4905}
4906
4907#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4908#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4909#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4910#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4911STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4912static inline unsigned int
4913rb_thread_shield_waiting(VALUE b)
4914{
4915 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4916}
4917
4918static inline void
4919rb_thread_shield_waiting_inc(VALUE b)
4920{
4921 unsigned int w = rb_thread_shield_waiting(b);
4922 w++;
4923 if (w > THREAD_SHIELD_WAITING_MAX)
4924 rb_raise(rb_eRuntimeError, "waiting count overflow");
4925 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4926 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4927}
4928
4929static inline void
4930rb_thread_shield_waiting_dec(VALUE b)
4931{
4932 unsigned int w = rb_thread_shield_waiting(b);
4933 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4934 w--;
4935 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4936 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4937}
4938
4939VALUE
4940rb_thread_shield_new(void)
4941{
4942 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4943 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4944 return thread_shield;
4945}
4946
4947bool
4948rb_thread_shield_owned(VALUE self)
4949{
4950 VALUE mutex = GetThreadShieldPtr(self);
4951 if (!mutex) return false;
4952
4953 rb_mutex_t *m = mutex_ptr(mutex);
4954
4955 return m->fiber == GET_EC()->fiber_ptr;
4956}
4957
4958/*
4959 * Wait a thread shield.
4960 *
4961 * Returns
4962 * true: acquired the thread shield
4963 * false: the thread shield was destroyed and no other threads waiting
4964 * nil: the thread shield was destroyed but still in use
4965 */
4966VALUE
4967rb_thread_shield_wait(VALUE self)
4968{
4969 VALUE mutex = GetThreadShieldPtr(self);
4970 rb_mutex_t *m;
4971
4972 if (!mutex) return Qfalse;
4973 m = mutex_ptr(mutex);
4974 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
4975 rb_thread_shield_waiting_inc(self);
4976 rb_mutex_lock(mutex);
4977 rb_thread_shield_waiting_dec(self);
4978 if (DATA_PTR(self)) return Qtrue;
4979 rb_mutex_unlock(mutex);
4980 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4981}
4982
4983static VALUE
4984thread_shield_get_mutex(VALUE self)
4985{
4986 VALUE mutex = GetThreadShieldPtr(self);
4987 if (!mutex)
4988 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4989 return mutex;
4990}
4991
4992/*
4993 * Release a thread shield, and return true if it has waiting threads.
4994 */
4995VALUE
4996rb_thread_shield_release(VALUE self)
4997{
4998 VALUE mutex = thread_shield_get_mutex(self);
4999 rb_mutex_unlock(mutex);
5000 return RBOOL(rb_thread_shield_waiting(self) > 0);
5001}
5002
5003/*
5004 * Release and destroy a thread shield, and return true if it has waiting threads.
5005 */
5006VALUE
5007rb_thread_shield_destroy(VALUE self)
5008{
5009 VALUE mutex = thread_shield_get_mutex(self);
5010 DATA_PTR(self) = 0;
5011 rb_mutex_unlock(mutex);
5012 return RBOOL(rb_thread_shield_waiting(self) > 0);
5013}
5014
5015static VALUE
5016threadptr_recursive_hash(rb_thread_t *th)
5017{
5018 return th->ec->local_storage_recursive_hash;
5019}
5020
5021static void
5022threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5023{
5024 th->ec->local_storage_recursive_hash = hash;
5025}
5026
5028
5029/*
5030 * Returns the current "recursive list" used to detect recursion.
5031 * This list is a hash table, unique for the current thread and for
5032 * the current __callee__.
5033 */
5034
5035static VALUE
5036recursive_list_access(VALUE sym)
5037{
5038 rb_thread_t *th = GET_THREAD();
5039 VALUE hash = threadptr_recursive_hash(th);
5040 VALUE list;
5041 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5042 hash = rb_ident_hash_new();
5043 threadptr_recursive_hash_set(th, hash);
5044 list = Qnil;
5045 }
5046 else {
5047 list = rb_hash_aref(hash, sym);
5048 }
5049 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5050 list = rb_ident_hash_new();
5051 rb_hash_aset(hash, sym, list);
5052 }
5053 return list;
5054}
5055
5056/*
5057 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5058 * in the recursion list.
5059 * Assumes the recursion list is valid.
5060 */
5061
5062static VALUE
5063recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5064{
5065#if SIZEOF_LONG == SIZEOF_VOIDP
5066 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5067#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5068 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5069 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5070#endif
5071
5072 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5073 if (UNDEF_P(pair_list))
5074 return Qfalse;
5075 if (paired_obj_id) {
5076 if (!RB_TYPE_P(pair_list, T_HASH)) {
5077 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5078 return Qfalse;
5079 }
5080 else {
5081 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5082 return Qfalse;
5083 }
5084 }
5085 return Qtrue;
5086}
5087
5088/*
5089 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5090 * For a single obj, it sets list[obj] to Qtrue.
5091 * For a pair, it sets list[obj] to paired_obj_id if possible,
5092 * otherwise list[obj] becomes a hash like:
5093 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5094 * Assumes the recursion list is valid.
5095 */
5096
5097static void
5098recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5099{
5100 VALUE pair_list;
5101
5102 if (!paired_obj) {
5103 rb_hash_aset(list, obj, Qtrue);
5104 }
5105 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5106 rb_hash_aset(list, obj, paired_obj);
5107 }
5108 else {
5109 if (!RB_TYPE_P(pair_list, T_HASH)){
5110 VALUE other_paired_obj = pair_list;
5111 pair_list = rb_hash_new();
5112 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5113 rb_hash_aset(list, obj, pair_list);
5114 }
5115 rb_hash_aset(pair_list, paired_obj, Qtrue);
5116 }
5117}
5118
5119/*
5120 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5121 * For a pair, if list[obj] is a hash, then paired_obj_id is
5122 * removed from the hash and no attempt is made to simplify
5123 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5124 * Assumes the recursion list is valid.
5125 */
5126
5127static int
5128recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5129{
5130 if (paired_obj) {
5131 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5132 if (UNDEF_P(pair_list)) {
5133 return 0;
5134 }
5135 if (RB_TYPE_P(pair_list, T_HASH)) {
5136 rb_hash_delete_entry(pair_list, paired_obj);
5137 if (!RHASH_EMPTY_P(pair_list)) {
5138 return 1; /* keep hash until is empty */
5139 }
5140 }
5141 }
5142 rb_hash_delete_entry(list, obj);
5143 return 1;
5144}
5146struct exec_recursive_params {
5147 VALUE (*func) (VALUE, VALUE, int);
5148 VALUE list;
5149 VALUE obj;
5150 VALUE pairid;
5151 VALUE arg;
5152};
5153
5154static VALUE
5155exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5156{
5157 struct exec_recursive_params *p = (void *)data;
5158 return (*p->func)(p->obj, p->arg, FALSE);
5159}
5160
5161/*
5162 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5163 * current method is called recursively on obj, or on the pair <obj, pairid>
5164 * If outer is 0, then the innermost func will be called with recursive set
5165 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5166 * all inner func are short-circuited by throw.
5167 * Implementation details: the value thrown is the recursive list which is
5168 * proper to the current method and unlikely to be caught anywhere else.
5169 * list[recursive_key] is used as a flag for the outermost call.
5170 */
5171
5172static VALUE
5173exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5174{
5175 VALUE result = Qundef;
5176 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5177 struct exec_recursive_params p;
5178 int outermost;
5179 p.list = recursive_list_access(sym);
5180 p.obj = obj;
5181 p.pairid = pairid;
5182 p.arg = arg;
5183 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5184
5185 if (recursive_check(p.list, p.obj, pairid)) {
5186 if (outer && !outermost) {
5187 rb_throw_obj(p.list, p.list);
5188 }
5189 return (*func)(obj, arg, TRUE);
5190 }
5191 else {
5192 enum ruby_tag_type state;
5193
5194 p.func = func;
5195
5196 if (outermost) {
5197 recursive_push(p.list, ID2SYM(recursive_key), 0);
5198 recursive_push(p.list, p.obj, p.pairid);
5199 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5200 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5201 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5202 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5203 if (result == p.list) {
5204 result = (*func)(obj, arg, TRUE);
5205 }
5206 }
5207 else {
5208 volatile VALUE ret = Qundef;
5209 recursive_push(p.list, p.obj, p.pairid);
5210 EC_PUSH_TAG(GET_EC());
5211 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5212 ret = (*func)(obj, arg, FALSE);
5213 }
5214 EC_POP_TAG();
5215 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5216 goto invalid;
5217 }
5218 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5219 result = ret;
5220 }
5221 }
5222 *(volatile struct exec_recursive_params *)&p;
5223 return result;
5224
5225 invalid:
5226 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5227 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5228 sym, rb_thread_current());
5230}
5231
5232/*
5233 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5234 * current method is called recursively on obj
5235 */
5236
5238rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5239{
5240 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5241}
5242
5243/*
5244 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5245 * current method is called recursively on the ordered pair <obj, paired_obj>
5246 */
5247
5249rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5250{
5251 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5252}
5253
5254/*
5255 * If recursion is detected on the current method and obj, the outermost
5256 * func will be called with (obj, arg, Qtrue). All inner func will be
5257 * short-circuited using throw.
5258 */
5259
5261rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5262{
5263 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5264}
5265
5266VALUE
5267rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5268{
5269 return exec_recursive(func, obj, 0, arg, 1, mid);
5270}
5271
5272/*
5273 * If recursion is detected on the current method, obj and paired_obj,
5274 * the outermost func will be called with (obj, arg, Qtrue). All inner
5275 * func will be short-circuited using throw.
5276 */
5277
5279rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5280{
5281 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5282}
5283
5284/*
5285 * call-seq:
5286 * thread.backtrace -> array or nil
5287 *
5288 * Returns the current backtrace of the target thread.
5289 *
5290 */
5291
5292static VALUE
5293rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5294{
5295 return rb_vm_thread_backtrace(argc, argv, thval);
5296}
5297
5298/* call-seq:
5299 * thread.backtrace_locations(*args) -> array or nil
5300 *
5301 * Returns the execution stack for the target thread---an array containing
5302 * backtrace location objects.
5303 *
5304 * See Thread::Backtrace::Location for more information.
5305 *
5306 * This method behaves similarly to Kernel#caller_locations except it applies
5307 * to a specific thread.
5308 */
5309static VALUE
5310rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5311{
5312 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5313}
5314
5315void
5316Init_Thread_Mutex(void)
5317{
5318 rb_thread_t *th = GET_THREAD();
5319
5320 rb_native_mutex_initialize(&th->vm->waitpid_lock);
5321 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5322 rb_native_mutex_initialize(&th->interrupt_lock);
5323}
5324
5325/*
5326 * Document-class: ThreadError
5327 *
5328 * Raised when an invalid operation is attempted on a thread.
5329 *
5330 * For example, when no other thread has been started:
5331 *
5332 * Thread.stop
5333 *
5334 * This will raises the following exception:
5335 *
5336 * ThreadError: stopping only thread
5337 * note: use sleep to stop forever
5338 */
5339
5340void
5341Init_Thread(void)
5342{
5343 VALUE cThGroup;
5344 rb_thread_t *th = GET_THREAD();
5345
5346 sym_never = ID2SYM(rb_intern_const("never"));
5347 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5348 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5349
5350 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5351 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5352 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5353 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5354 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5355 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5356 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5357 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5358 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5359 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5360 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5361 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5362 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5363 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5364 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5365 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5366 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5367 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5368 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5369
5370 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5371 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5372 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5373 rb_define_method(rb_cThread, "value", thread_value, 0);
5374 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5375 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5376 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5377 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5378 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5379 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5380 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5381 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5382 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5383 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5384 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5385 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5386 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5387 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5388 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5389 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5390 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5391 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5392 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5393 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5394 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5395 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5396 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5397 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5398 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5399 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5400
5401 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5402 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5403 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5404 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5405 rb_define_alias(rb_cThread, "inspect", "to_s");
5406
5407 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5408 "stream closed in another thread");
5409
5410 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5411 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5412 rb_define_method(cThGroup, "list", thgroup_list, 0);
5413 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5414 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5415 rb_define_method(cThGroup, "add", thgroup_add, 1);
5416
5417 {
5418 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5419 rb_define_const(cThGroup, "Default", th->thgroup);
5420 }
5421
5423
5424 /* init thread core */
5425 {
5426 /* main thread setting */
5427 {
5428 /* acquire global vm lock */
5429 struct rb_thread_sched *sched = TH_SCHED(th);
5430 thread_sched_to_running(sched, th);
5431
5432 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5433 th->pending_interrupt_queue_checked = 0;
5434 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5435 }
5436 }
5437
5438 rb_thread_create_timer_thread();
5439
5440 Init_thread_sync();
5441}
5442
5445{
5446 rb_thread_t *th = ruby_thread_from_native();
5447
5448 return th != 0;
5449}
5450
5451#ifdef NON_SCALAR_THREAD_ID
5452 #define thread_id_str(th) (NULL)
5453#else
5454 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5455#endif
5456
5457static void
5458debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5459{
5460 rb_thread_t *th = 0;
5461 VALUE sep = rb_str_new_cstr("\n ");
5462
5463 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5464 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5465 (void *)GET_THREAD(), (void *)r->threads.main);
5466
5467 ccan_list_for_each(&r->threads.set, th, lt_node) {
5468 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5469 "native:%p int:%u",
5470 th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5471
5472 if (th->locking_mutex) {
5473 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5474 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5475 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5476 }
5477
5478 {
5479 struct rb_waiting_list *list = th->join_list;
5480 while (list) {
5481 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5482 list = list->next;
5483 }
5484 }
5485 rb_str_catf(msg, "\n ");
5486 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5487 rb_str_catf(msg, "\n");
5488 }
5489}
5490
5491static void
5492rb_check_deadlock(rb_ractor_t *r)
5493{
5494 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5495
5496 int found = 0;
5497 rb_thread_t *th = NULL;
5498 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5499 int ltnum = rb_ractor_living_thread_num(r);
5500
5501 if (ltnum > sleeper_num) return;
5502 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5503 if (patrol_thread && patrol_thread != GET_THREAD()) return;
5504
5505 ccan_list_for_each(&r->threads.set, th, lt_node) {
5506 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5507 found = 1;
5508 }
5509 else if (th->locking_mutex) {
5510 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5511 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5512 found = 1;
5513 }
5514 }
5515 if (found)
5516 break;
5517 }
5518
5519 if (!found) {
5520 VALUE argv[2];
5521 argv[0] = rb_eFatal;
5522 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5523 debug_deadlock_check(r, argv[1]);
5524 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5525 rb_threadptr_raise(r->threads.main, 2, argv);
5526 }
5527}
5528
5529// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5530// structs. Defined here because the struct definition lives here as well.
5531size_t
5532rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5533{
5534 struct waiting_fd *waitfd = 0;
5535 size_t size = 0;
5536
5537 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5538 size += sizeof(struct waiting_fd);
5539 }
5540
5541 return size;
5542}
5543
5544static void
5545update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5546{
5547 const rb_control_frame_t *cfp = GET_EC()->cfp;
5548 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5549 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5550 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5551 if (lines) {
5552 long line = rb_sourceline() - 1;
5553 long count;
5554 VALUE num;
5555 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5556 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5557 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5558 rb_ary_push(lines, LONG2FIX(line + 1));
5559 return;
5560 }
5561 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5562 return;
5563 }
5564 num = RARRAY_AREF(lines, line);
5565 if (!FIXNUM_P(num)) return;
5566 count = FIX2LONG(num) + 1;
5567 if (POSFIXABLE(count)) {
5568 RARRAY_ASET(lines, line, LONG2FIX(count));
5569 }
5570 }
5571 }
5572}
5573
5574static void
5575update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5576{
5577 const rb_control_frame_t *cfp = GET_EC()->cfp;
5578 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5579 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5580 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5581 if (branches) {
5582 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5583 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5584 VALUE counters = RARRAY_AREF(branches, 1);
5585 VALUE num = RARRAY_AREF(counters, idx);
5586 count = FIX2LONG(num) + 1;
5587 if (POSFIXABLE(count)) {
5588 RARRAY_ASET(counters, idx, LONG2FIX(count));
5589 }
5590 }
5591 }
5592}
5593
5594const rb_method_entry_t *
5595rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5596{
5597 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5598
5599 if (!me->def) return NULL; // negative cme
5600
5601 retry:
5602 switch (me->def->type) {
5603 case VM_METHOD_TYPE_ISEQ: {
5604 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5605 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5606 path = rb_iseq_path(iseq);
5607 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5608 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5609 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5610 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5611 break;
5612 }
5613 case VM_METHOD_TYPE_BMETHOD: {
5614 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5615 if (iseq) {
5616 rb_iseq_location_t *loc;
5617 rb_iseq_check(iseq);
5618 path = rb_iseq_path(iseq);
5619 loc = &ISEQ_BODY(iseq)->location;
5620 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5621 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5622 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5623 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5624 break;
5625 }
5626 return NULL;
5627 }
5628 case VM_METHOD_TYPE_ALIAS:
5629 me = me->def->body.alias.original_me;
5630 goto retry;
5631 case VM_METHOD_TYPE_REFINED:
5632 me = me->def->body.refined.orig_me;
5633 if (!me) return NULL;
5634 goto retry;
5635 default:
5636 return NULL;
5637 }
5638
5639 /* found */
5640 if (RB_TYPE_P(path, T_ARRAY)) {
5641 path = rb_ary_entry(path, 1);
5642 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5643 }
5644 if (resolved_location) {
5645 resolved_location[0] = path;
5646 resolved_location[1] = beg_pos_lineno;
5647 resolved_location[2] = beg_pos_column;
5648 resolved_location[3] = end_pos_lineno;
5649 resolved_location[4] = end_pos_column;
5650 }
5651 return me;
5652}
5653
5654static void
5655update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5656{
5657 const rb_control_frame_t *cfp = GET_EC()->cfp;
5658 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5659 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5660 VALUE rcount;
5661 long count;
5662
5663 me = rb_resolve_me_location(me, 0);
5664 if (!me) return;
5665
5666 rcount = rb_hash_aref(me2counter, (VALUE) me);
5667 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5668 if (POSFIXABLE(count)) {
5669 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5670 }
5671}
5672
5673VALUE
5674rb_get_coverages(void)
5675{
5676 return GET_VM()->coverages;
5677}
5678
5679int
5680rb_get_coverage_mode(void)
5681{
5682 return GET_VM()->coverage_mode;
5683}
5684
5685void
5686rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5687{
5688 GET_VM()->coverages = coverages;
5689 GET_VM()->me2counter = me2counter;
5690 GET_VM()->coverage_mode = mode;
5691}
5692
5693void
5694rb_resume_coverages(void)
5695{
5696 int mode = GET_VM()->coverage_mode;
5697 VALUE me2counter = GET_VM()->me2counter;
5698 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5699 if (mode & COVERAGE_TARGET_BRANCHES) {
5700 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5701 }
5702 if (mode & COVERAGE_TARGET_METHODS) {
5703 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5704 }
5705}
5706
5707void
5708rb_suspend_coverages(void)
5709{
5710 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5711 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5712 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5713 }
5714 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5715 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5716 }
5717}
5718
5719/* Make coverage arrays empty so old covered files are no longer tracked. */
5720void
5721rb_reset_coverages(void)
5722{
5723 rb_clear_coverages();
5724 rb_iseq_remove_coverage_all();
5725 GET_VM()->coverages = Qfalse;
5726}
5727
5728VALUE
5729rb_default_coverage(int n)
5730{
5731 VALUE coverage = rb_ary_hidden_new_fill(3);
5732 VALUE lines = Qfalse, branches = Qfalse;
5733 int mode = GET_VM()->coverage_mode;
5734
5735 if (mode & COVERAGE_TARGET_LINES) {
5736 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5737 }
5738 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5739
5740 if (mode & COVERAGE_TARGET_BRANCHES) {
5741 branches = rb_ary_hidden_new_fill(2);
5742 /* internal data structures for branch coverage:
5743 *
5744 * { branch base node =>
5745 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5746 * branch target id =>
5747 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5748 * ...
5749 * }],
5750 * ...
5751 * }
5752 *
5753 * Example:
5754 * { NODE_CASE =>
5755 * [1, 0, 4, 3, {
5756 * NODE_WHEN => [2, 8, 2, 9, 0],
5757 * NODE_WHEN => [3, 8, 3, 9, 1],
5758 * ...
5759 * }],
5760 * ...
5761 * }
5762 */
5763 VALUE structure = rb_hash_new();
5764 rb_obj_hide(structure);
5765 RARRAY_ASET(branches, 0, structure);
5766 /* branch execution counters */
5767 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5768 }
5769 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5770
5771 return coverage;
5772}
5773
5774static VALUE
5775uninterruptible_exit(VALUE v)
5776{
5777 rb_thread_t *cur_th = GET_THREAD();
5778 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5779
5780 cur_th->pending_interrupt_queue_checked = 0;
5781 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5782 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5783 }
5784 return Qnil;
5785}
5786
5787VALUE
5788rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5789{
5790 VALUE interrupt_mask = rb_ident_hash_new();
5791 rb_thread_t *cur_th = GET_THREAD();
5792
5793 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5794 OBJ_FREEZE_RAW(interrupt_mask);
5795 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5796
5797 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5798
5799 RUBY_VM_CHECK_INTS(cur_th->ec);
5800 return ret;
5801}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:85
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:293
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:53
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:115
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:37
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:54
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_SET().
Definition fl_type.h:638
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:923
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2289
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1098
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:881
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:868
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:145
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition fl_type.h:144
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:298
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:470
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition error.c:3150
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:688
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1084
VALUE rb_eIOError
IOError exception.
Definition io.c:182
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1088
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1091
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:3472
VALUE rb_eFatal
fatal exception.
Definition error.c:1087
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1089
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition error.c:411
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1129
VALUE rb_eArgError
ArgumentError exception.
Definition error.c:1092
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:886
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4519
VALUE rb_eSignal
SignalException exception.
Definition error.c:1086
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:1940
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:84
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:191
VALUE rb_cThread
Thread class.
Definition vm.c:466
VALUE rb_cModule
Module class.
Definition object.c:53
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3624
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:788
static const char * rb_enc_name(rb_encoding *enc)
Queries the (canonical) name of the passed encoding.
Definition encoding.h:433
static bool rb_enc_asciicompat(rb_encoding *enc)
Queries if the passed encoding is in some sense compatible with ASCII.
Definition encoding.h:784
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:280
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1776
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1382
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3453
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1457
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3397
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2590
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:2829
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
Definition thread.c:5237
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1381
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2530
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1425
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2741
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
Definition thread.c:5248
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4708
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1440
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2732
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2685
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
Definition thread.c:5278
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1388
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4703
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2808
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3669
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3545
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1488
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
Definition thread.c:5260
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2694
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1463
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1931
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2853
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1606
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1218
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:188
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1702
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1085
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:11924
ID rb_to_id(VALUE str)
Identical to rb_intern(), except it takes an instance of rb_cString.
Definition string.c:11914
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3440
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:183
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1541
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1774
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1672
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
Definition thread.c:1679
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition sprintf.c:1219
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition sprintf.c:1242
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1357
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2277
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:366
#define ALLOCA_N(type, n)
Definition memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition rarray.h:70
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:343
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:566
#define RARRAY_AREF(a, i)
Definition rarray.h:583
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:69
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:92
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:95
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:507
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:441
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:489
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5443
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1045
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:219
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:383
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:180
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:402
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4201
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:190
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:302
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:308
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:290
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:296
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:375