12#include "ruby/internal/config.h"
21#ifdef NEED_MADVICE_PROTOTYPE_USING_CADDR_T
23extern int madvise(caddr_t,
size_t,
int);
28#include "eval_intern.h"
30#include "internal/cont.h"
31#include "internal/thread.h"
32#include "internal/error.h"
33#include "internal/gc.h"
34#include "internal/proc.h"
35#include "internal/sanitizers.h"
36#include "internal/warnings.h"
43#include "ractor_core.h"
45static const int DEBUG = 0;
47#define RB_PAGE_SIZE (pagesize)
48#define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
52static VALUE rb_cContinuation;
53static VALUE rb_cFiber;
54static VALUE rb_eFiberError;
55#ifdef RB_EXPERIMENTAL_FIBER_POOL
56static VALUE rb_cFiberPool;
59#define CAPTURE_JUST_VALID_VM_STACK 1
62#ifdef COROUTINE_LIMITED_ADDRESS_SPACE
63#define FIBER_POOL_ALLOCATION_FREE
64#define FIBER_POOL_INITIAL_SIZE 8
65#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 32
67#define FIBER_POOL_INITIAL_SIZE 32
68#define FIBER_POOL_ALLOCATION_MAXIMUM_SIZE 1024
70#ifdef RB_EXPERIMENTAL_FIBER_POOL
71#define FIBER_POOL_ALLOCATION_FREE
75 CONTINUATION_CONTEXT = 0,
81#ifdef CAPTURE_JUST_VALID_VM_STACK
118#ifdef FIBER_POOL_ALLOCATION_FREE
161#ifdef FIBER_POOL_ALLOCATION_FREE
169#ifdef FIBER_POOL_ALLOCATION_FREE
190 size_t initial_count;
201 size_t vm_stack_size;
206 rb_execution_context_t *ec;
214 enum context_type type;
227 rb_execution_context_t saved_ec;
252#define FIBER_CREATED_P(fiber) ((fiber)->status == FIBER_CREATED)
253#define FIBER_RESUMED_P(fiber) ((fiber)->status == FIBER_RESUMED)
254#define FIBER_SUSPENDED_P(fiber) ((fiber)->status == FIBER_SUSPENDED)
255#define FIBER_TERMINATED_P(fiber) ((fiber)->status == FIBER_TERMINATED)
256#define FIBER_RUNNABLE_P(fiber) (FIBER_CREATED_P(fiber) || FIBER_SUSPENDED_P(fiber))
264 BITFIELD(
enum fiber_status, status, 2);
266 unsigned int yielding : 1;
267 unsigned int blocking : 1;
269 unsigned int killed : 1;
275static struct fiber_pool shared_fiber_pool = {NULL, NULL, 0, 0, 0, 0};
278rb_free_shared_fiber_pool(
void)
281 while (allocations) {
288static ID fiber_initialize_keywords[3] = {0};
295#if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
296#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
298#define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
301#define ERRNOMSG strerror(errno)
305fiber_pool_vacancy_pointer(
void * base,
size_t size)
307 STACK_GROW_DIR_DETECTION;
310 (
char*)base + STACK_DIR_UPPER(0, size - RB_PAGE_SIZE)
314#if defined(COROUTINE_SANITIZE_ADDRESS)
319 STACK_GROW_DIR_DETECTION;
321 return (
char*)stack->base + STACK_DIR_UPPER(RB_PAGE_SIZE, 0);
328 return stack->size - RB_PAGE_SIZE;
336 STACK_GROW_DIR_DETECTION;
338 stack->current = (
char*)stack->base + STACK_DIR_UPPER(0, stack->size);
339 stack->available = stack->size;
346 STACK_GROW_DIR_DETECTION;
348 VM_ASSERT(stack->current);
350 return STACK_DIR_UPPER(stack->current, (
char*)stack->current - stack->available);
358 STACK_GROW_DIR_DETECTION;
360 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_alloca(%p): %"PRIuSIZE
"/%"PRIuSIZE
"\n", (
void*)stack, offset, stack->available);
361 VM_ASSERT(stack->available >= offset);
364 void * pointer = STACK_DIR_UPPER(stack->current, (
char*)stack->current - offset);
367 stack->current = STACK_DIR_UPPER((
char*)stack->current + offset, (
char*)stack->current - offset);
368 stack->available -= offset;
377 fiber_pool_stack_reset(&vacancy->stack);
380 fiber_pool_stack_alloca(&vacancy->stack, RB_PAGE_SIZE);
386 vacancy->next = head;
388#ifdef FIBER_POOL_ALLOCATION_FREE
390 head->previous = vacancy;
391 vacancy->previous = NULL;
398#ifdef FIBER_POOL_ALLOCATION_FREE
403 vacancy->next->previous = vacancy->previous;
406 if (vacancy->previous) {
407 vacancy->previous->next = vacancy->next;
411 vacancy->stack.pool->vacancies = vacancy->next;
416fiber_pool_vacancy_pop(
struct fiber_pool * pool)
421 fiber_pool_vacancy_remove(vacancy);
428fiber_pool_vacancy_pop(
struct fiber_pool * pool)
433 pool->vacancies = vacancy->next;
448 vacancy->stack.base = base;
449 vacancy->stack.size = size;
451 fiber_pool_vacancy_reset(vacancy);
455 return fiber_pool_vacancy_push(vacancy, vacancies);
463fiber_pool_allocate_memory(
size_t * count,
size_t stride)
473 void * base = VirtualAlloc(0, (*count)*stride, MEM_COMMIT, PAGE_READWRITE);
476 *count = (*count) >> 1;
483 size_t mmap_size = (*count)*stride;
484 void * base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
486 if (base == MAP_FAILED) {
488 *count = (*count) >> 1;
491 ruby_annotate_mmap(base, mmap_size,
"Ruby:fiber_pool_allocate_memory");
492#if defined(MADV_FREE_REUSE)
496 while (madvise(base, mmap_size, MADV_FREE_REUSE) == -1 &&
errno == EAGAIN);
513 STACK_GROW_DIR_DETECTION;
516 size_t stride = size + RB_PAGE_SIZE;
519 void * base = fiber_pool_allocate_memory(&count, stride);
522 rb_raise(rb_eFiberError,
"can't alloc machine stack to fiber (%"PRIuSIZE
" x %"PRIuSIZE
" bytes): %s", count, size, ERRNOMSG);
529 allocation->base = base;
530 allocation->size = size;
531 allocation->stride = stride;
532 allocation->count = count;
533#ifdef FIBER_POOL_ALLOCATION_FREE
534 allocation->used = 0;
539 fprintf(stderr,
"fiber_pool_expand(%"PRIuSIZE
"): %p, %"PRIuSIZE
"/%"PRIuSIZE
" x [%"PRIuSIZE
":%"PRIuSIZE
"]\n",
544 for (
size_t i = 0; i < count; i += 1) {
545 void * base = (
char*)allocation->base + (stride * i);
546 void * page = (
char*)base + STACK_DIR_UPPER(size, 0);
551 if (!VirtualProtect(page, RB_PAGE_SIZE, PAGE_READWRITE | PAGE_GUARD, &old_protect)) {
552 VirtualFree(allocation->base, 0, MEM_RELEASE);
553 rb_raise(rb_eFiberError,
"can't set a guard page: %s", ERRNOMSG);
556 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
557 munmap(allocation->base, count*stride);
558 rb_raise(rb_eFiberError,
"can't set a guard page: %s", ERRNOMSG);
562 vacancies = fiber_pool_vacancy_initialize(
564 (
char*)base + STACK_DIR_UPPER(0, RB_PAGE_SIZE),
568#ifdef FIBER_POOL_ALLOCATION_FREE
569 vacancies->stack.allocation = allocation;
576#ifdef FIBER_POOL_ALLOCATION_FREE
577 if (allocation->next) {
578 allocation->next->previous = allocation;
581 allocation->previous = NULL;
594fiber_pool_initialize(
struct fiber_pool *
fiber_pool,
size_t size,
size_t count,
size_t vm_stack_size)
596 VM_ASSERT(vm_stack_size < size);
600 fiber_pool->size = ((size / RB_PAGE_SIZE) + 1) * RB_PAGE_SIZE;
611#ifdef FIBER_POOL_ALLOCATION_FREE
616 STACK_GROW_DIR_DETECTION;
618 VM_ASSERT(allocation->used == 0);
620 if (DEBUG) fprintf(stderr,
"fiber_pool_allocation_free: %p base=%p count=%"PRIuSIZE
"\n", (
void*)allocation, allocation->base, allocation->count);
623 for (i = 0; i < allocation->count; i += 1) {
624 void * base = (
char*)allocation->base + (allocation->stride * i) + STACK_DIR_UPPER(0, RB_PAGE_SIZE);
626 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(base, allocation->size);
629 fiber_pool_vacancy_remove(vacancy);
633 VirtualFree(allocation->base, 0, MEM_RELEASE);
635 munmap(allocation->base, allocation->stride * allocation->count);
638 if (allocation->previous) {
639 allocation->previous->next = allocation->next;
643 allocation->pool->allocations = allocation->next;
646 if (allocation->next) {
647 allocation->next->previous = allocation->previous;
650 allocation->pool->count -= allocation->count;
652 ruby_xfree(allocation);
662 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_acquire: %p used=%"PRIuSIZE
"\n", (
void*)
fiber_pool->vacancies,
fiber_pool->used);
665 const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE;
666 const size_t minimum =
fiber_pool->initial_count;
669 if (count > maximum) count = maximum;
670 if (count < minimum) count = minimum;
681 VM_ASSERT(vacancy->stack.base);
683#if defined(COROUTINE_SANITIZE_ADDRESS)
684 __asan_unpoison_memory_region(fiber_pool_stack_poison_base(&vacancy->stack), fiber_pool_stack_poison_size(&vacancy->stack));
690#ifdef FIBER_POOL_ALLOCATION_FREE
691 vacancy->stack.allocation->used += 1;
694 fiber_pool_stack_reset(&vacancy->stack);
696 return vacancy->stack;
704 void * base = fiber_pool_stack_base(stack);
705 size_t size = stack->available;
708 VM_ASSERT(size <= (stack->size - RB_PAGE_SIZE));
710 int advice = stack->pool->free_stacks >> 1;
712 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_free: %p+%"PRIuSIZE
" [base=%p, size=%"PRIuSIZE
"] advice=%d\n", base, size, stack->base, stack->size, advice);
725#elif VM_CHECK_MODE > 0 && defined(MADV_DONTNEED)
726 if (!advice) advice = MADV_DONTNEED;
728 madvise(base, size, advice);
729#elif defined(MADV_FREE_REUSABLE)
730 if (!advice) advice = MADV_FREE_REUSABLE;
736 while (madvise(base, size, advice) == -1 &&
errno == EAGAIN);
737#elif defined(MADV_FREE)
738 if (!advice) advice = MADV_FREE;
740 madvise(base, size, advice);
741#elif defined(MADV_DONTNEED)
742 if (!advice) advice = MADV_DONTNEED;
744 madvise(base, size, advice);
745#elif defined(POSIX_MADV_DONTNEED)
746 if (!advice) advice = POSIX_MADV_DONTNEED;
748 posix_madvise(base, size, advice);
750 VirtualAlloc(base, size, MEM_RESET, PAGE_READWRITE);
755#if defined(COROUTINE_SANITIZE_ADDRESS)
756 __asan_poison_memory_region(fiber_pool_stack_poison_base(stack), fiber_pool_stack_poison_size(stack));
765 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
767 if (DEBUG) fprintf(stderr,
"fiber_pool_stack_release: %p used=%"PRIuSIZE
"\n", stack->base, stack->pool->used);
770 vacancy->stack = *stack;
774 fiber_pool_vacancy_reset(vacancy);
777 pool->vacancies = fiber_pool_vacancy_push(vacancy, pool->vacancies);
780#ifdef FIBER_POOL_ALLOCATION_FREE
783 allocation->used -= 1;
786 if (allocation->used == 0) {
787 fiber_pool_allocation_free(allocation);
789 else if (stack->pool->free_stacks) {
790 fiber_pool_stack_free(&vacancy->stack);
795 if (stack->pool->free_stacks) {
796 fiber_pool_stack_free(&vacancy->stack);
802ec_switch(rb_thread_t *th, rb_fiber_t *fiber)
804 rb_execution_context_t *ec = &fiber->cont.saved_ec;
805#ifdef RUBY_ASAN_ENABLED
806 ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
808 rb_ractor_set_current_ec(th->ractor, th->ec = ec);
815 if (th->vm->ractor.main_thread == th &&
816 rb_signal_buff_size() > 0) {
817 RUBY_VM_SET_TRAP_INTERRUPT(ec);
820 VM_ASSERT(ec->fiber_ptr->cont.self == 0 || ec->vm_stack != NULL);
824fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fiber)
826 ec_switch(th, fiber);
827 VM_ASSERT(th->ec->fiber_ptr == fiber);
830#ifndef COROUTINE_DECL
831# define COROUTINE_DECL COROUTINE
837 rb_fiber_t *fiber = to->argument;
839#if defined(COROUTINE_SANITIZE_ADDRESS)
849 __sanitizer_finish_switch_fiber(to->fake_stack, (
const void**)&from->stack_base, &from->stack_size);
852 rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
854#ifdef COROUTINE_PTHREAD_CONTEXT
855 ruby_thread_set_native(thread);
858 fiber_restore_thread(thread, fiber);
860 rb_fiber_start(fiber);
862#ifndef COROUTINE_PTHREAD_CONTEXT
863 VM_UNREACHABLE(fiber_entry);
869fiber_initialize_coroutine(rb_fiber_t *fiber,
size_t * vm_stack_size)
872 rb_execution_context_t *sec = &fiber->cont.saved_ec;
873 void * vm_stack = NULL;
877 fiber->stack = fiber_pool_stack_acquire(
fiber_pool);
878 vm_stack = fiber_pool_stack_alloca(&fiber->stack,
fiber_pool->vm_stack_size);
881 coroutine_initialize(&fiber->context, fiber_entry, fiber_pool_stack_base(&fiber->stack), fiber->stack.available);
884 sec->machine.stack_start = fiber->stack.current;
885 sec->machine.stack_maxsize = fiber->stack.available;
887 fiber->context.argument = (
void*)fiber;
895fiber_stack_release(rb_fiber_t * fiber)
897 rb_execution_context_t *ec = &fiber->cont.saved_ec;
899 if (DEBUG) fprintf(stderr,
"fiber_stack_release: %p, stack.base=%p\n", (
void*)fiber, fiber->stack.base);
902 if (fiber->stack.base) {
903 fiber_pool_stack_release(&fiber->stack);
904 fiber->stack.base = NULL;
908 rb_ec_clear_vm_stack(ec);
912fiber_status_name(
enum fiber_status s)
915 case FIBER_CREATED:
return "created";
916 case FIBER_RESUMED:
return "resumed";
917 case FIBER_SUSPENDED:
return "suspended";
918 case FIBER_TERMINATED:
return "terminated";
920 VM_UNREACHABLE(fiber_status_name);
925fiber_verify(
const rb_fiber_t *fiber)
928 VM_ASSERT(fiber->cont.saved_ec.fiber_ptr == fiber);
930 switch (fiber->status) {
932 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
934 case FIBER_SUSPENDED:
935 VM_ASSERT(fiber->cont.saved_ec.vm_stack != NULL);
938 case FIBER_TERMINATED:
942 VM_UNREACHABLE(fiber_verify);
948fiber_status_set(rb_fiber_t *fiber,
enum fiber_status s)
951 VM_ASSERT(!FIBER_TERMINATED_P(fiber));
952 VM_ASSERT(fiber->status != s);
973 if (!fiber) rb_raise(rb_eFiberError,
"uninitialized fiber");
978NOINLINE(
static VALUE cont_capture(
volatile int *
volatile stat));
980#define THREAD_MUST_BE_RUNNING(th) do { \
981 if (!(th)->ec->tag) rb_raise(rb_eThreadError, "not running thread"); \
985rb_fiber_threadptr(
const rb_fiber_t *fiber)
987 return fiber->cont.saved_ec.thread_ptr;
991cont_thread_value(
const rb_context_t *cont)
993 return cont->saved_ec.thread_ptr->self;
997cont_compact(
void *ptr)
999 rb_context_t *cont = ptr;
1002 cont->self = rb_gc_location(cont->self);
1004 cont->value = rb_gc_location(cont->value);
1005 rb_execution_context_update(&cont->saved_ec);
1011 rb_context_t *cont = ptr;
1013 RUBY_MARK_ENTER(
"cont");
1015 rb_gc_mark_movable(cont->self);
1017 rb_gc_mark_movable(cont->value);
1019 rb_execution_context_mark(&cont->saved_ec);
1020 rb_gc_mark(cont_thread_value(cont));
1022 if (cont->saved_vm_stack.ptr) {
1023#ifdef CAPTURE_JUST_VALID_VM_STACK
1024 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
1025 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1027 rb_gc_mark_locations(cont->saved_vm_stack.ptr,
1028 cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
1032 if (cont->machine.stack) {
1033 if (cont->type == CONTINUATION_CONTEXT) {
1035 rb_gc_mark_locations(cont->machine.stack,
1036 cont->machine.stack + cont->machine.stack_size);
1044 RUBY_MARK_LEAVE(
"cont");
1049fiber_is_root_p(
const rb_fiber_t *fiber)
1051 return fiber == fiber->cont.saved_ec.thread_ptr->root_fiber;
1055static void jit_cont_free(
struct rb_jit_cont *cont);
1060 rb_context_t *cont = ptr;
1062 RUBY_FREE_ENTER(
"cont");
1064 if (cont->type == CONTINUATION_CONTEXT) {
1065 ruby_xfree(cont->saved_ec.vm_stack);
1066 RUBY_FREE_UNLESS_NULL(cont->machine.stack);
1069 rb_fiber_t *fiber = (rb_fiber_t*)cont;
1070 coroutine_destroy(&fiber->context);
1071 fiber_stack_release(fiber);
1074 RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
1076 VM_ASSERT(cont->jit_cont != NULL);
1077 jit_cont_free(cont->jit_cont);
1080 RUBY_FREE_LEAVE(
"cont");
1084cont_memsize(
const void *ptr)
1086 const rb_context_t *cont = ptr;
1089 size =
sizeof(*cont);
1090 if (cont->saved_vm_stack.ptr) {
1091#ifdef CAPTURE_JUST_VALID_VM_STACK
1092 size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1094 size_t n = cont->saved_ec.vm_stack_size;
1096 size += n *
sizeof(*cont->saved_vm_stack.ptr);
1099 if (cont->machine.stack) {
1100 size += cont->machine.stack_size *
sizeof(*cont->machine.stack);
1107rb_fiber_update_self(rb_fiber_t *fiber)
1109 if (fiber->cont.self) {
1110 fiber->cont.self = rb_gc_location(fiber->cont.self);
1113 rb_execution_context_update(&fiber->cont.saved_ec);
1118rb_fiber_mark_self(
const rb_fiber_t *fiber)
1120 if (fiber->cont.self) {
1121 rb_gc_mark_movable(fiber->cont.self);
1124 rb_execution_context_mark(&fiber->cont.saved_ec);
1129fiber_compact(
void *ptr)
1131 rb_fiber_t *fiber = ptr;
1132 fiber->first_proc = rb_gc_location(fiber->first_proc);
1134 if (fiber->prev) rb_fiber_update_self(fiber->prev);
1136 cont_compact(&fiber->cont);
1137 fiber_verify(fiber);
1141fiber_mark(
void *ptr)
1143 rb_fiber_t *fiber = ptr;
1144 RUBY_MARK_ENTER(
"cont");
1145 fiber_verify(fiber);
1146 rb_gc_mark_movable(fiber->first_proc);
1147 if (fiber->prev) rb_fiber_mark_self(fiber->prev);
1148 cont_mark(&fiber->cont);
1149 RUBY_MARK_LEAVE(
"cont");
1153fiber_free(
void *ptr)
1155 rb_fiber_t *fiber = ptr;
1156 RUBY_FREE_ENTER(
"fiber");
1158 if (DEBUG) fprintf(stderr,
"fiber_free: %p[%p]\n", (
void *)fiber, fiber->stack.base);
1160 if (fiber->cont.saved_ec.local_storage) {
1161 rb_id_table_free(fiber->cont.saved_ec.local_storage);
1164 cont_free(&fiber->cont);
1165 RUBY_FREE_LEAVE(
"fiber");
1169fiber_memsize(
const void *ptr)
1171 const rb_fiber_t *fiber = ptr;
1172 size_t size =
sizeof(*fiber);
1173 const rb_execution_context_t *saved_ec = &fiber->cont.saved_ec;
1174 const rb_thread_t *th = rb_ec_thread_ptr(saved_ec);
1179 if (saved_ec->local_storage && fiber != th->root_fiber) {
1180 size += rb_id_table_memsize(saved_ec->local_storage);
1181 size += rb_obj_memsize_of(saved_ec->storage);
1184 size += cont_memsize(&fiber->cont);
1195cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
1199 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1201 if (th->ec->machine.stack_start > th->ec->machine.stack_end) {
1202 size = cont->machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1203 cont->machine.stack_src = th->ec->machine.stack_end;
1206 size = cont->machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1207 cont->machine.stack_src = th->ec->machine.stack_start;
1210 if (cont->machine.stack) {
1217 FLUSH_REGISTER_WINDOWS;
1218 asan_unpoison_memory_region(cont->machine.stack_src, size,
false);
1219 MEMCPY(cont->machine.stack, cont->machine.stack_src,
VALUE, size);
1224 {cont_mark, cont_free, cont_memsize, cont_compact},
1225 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1229cont_save_thread(rb_context_t *cont, rb_thread_t *th)
1231 rb_execution_context_t *sec = &cont->saved_ec;
1233 VM_ASSERT(th->status == THREAD_RUNNABLE);
1240 sec->machine.stack_end = NULL;
1243static rb_nativethread_lock_t jit_cont_lock;
1248jit_cont_new(rb_execution_context_t *ec)
1261 if (first_jit_cont == NULL) {
1262 cont->next = cont->prev = NULL;
1266 cont->next = first_jit_cont;
1267 first_jit_cont->prev = cont;
1269 first_jit_cont = cont;
1282 if (cont == first_jit_cont) {
1283 first_jit_cont = cont->next;
1284 if (first_jit_cont != NULL)
1285 first_jit_cont->prev = NULL;
1288 cont->prev->next = cont->next;
1289 if (cont->next != NULL)
1290 cont->next->prev = cont->prev;
1299rb_jit_cont_each_iseq(rb_iseq_callback callback,
void *data)
1302 for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
1303 if (cont->ec->vm_stack == NULL)
1306 const rb_control_frame_t *cfp = cont->ec->cfp;
1307 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
1308 if (cfp->pc && cfp->iseq && imemo_type((
VALUE)cfp->iseq) == imemo_iseq) {
1309 callback(cfp->iseq, data);
1311 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1320rb_yjit_cancel_jit_return(
void *leave_exit,
void *leave_exception)
1323 for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
1324 if (cont->ec->vm_stack == NULL)
1327 const rb_control_frame_t *cfp = cont->ec->cfp;
1328 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(cont->ec, cfp)) {
1329 if (cfp->jit_return && cfp->jit_return != leave_exception) {
1330 ((rb_control_frame_t *)cfp)->jit_return = leave_exit;
1332 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1340rb_jit_cont_finish(
void)
1343 for (cont = first_jit_cont; cont != NULL; cont = next) {
1351cont_init_jit_cont(rb_context_t *cont)
1353 VM_ASSERT(cont->jit_cont == NULL);
1355 cont->jit_cont = jit_cont_new(&(cont->saved_ec));
1361 return &fiber->cont.saved_ec;
1365cont_init(rb_context_t *cont, rb_thread_t *th)
1368 cont_save_thread(cont, th);
1369 cont->saved_ec.thread_ptr = th;
1370 cont->saved_ec.local_storage = NULL;
1371 cont->saved_ec.local_storage_recursive_hash =
Qnil;
1372 cont->saved_ec.local_storage_recursive_hash_for_trace =
Qnil;
1373 cont_init_jit_cont(cont);
1376static rb_context_t *
1377cont_new(
VALUE klass)
1380 volatile VALUE contval;
1381 rb_thread_t *th = GET_THREAD();
1383 THREAD_MUST_BE_RUNNING(th);
1385 cont->self = contval;
1386 cont_init(cont, th);
1393 return fiber->cont.self;
1399 return fiber->blocking;
1404rb_jit_cont_init(
void)
1411show_vm_stack(
const rb_execution_context_t *ec)
1413 VALUE *p = ec->vm_stack;
1414 while (p < ec->cfp->sp) {
1415 fprintf(stderr,
"%3d ", (
int)(p - ec->vm_stack));
1416 rb_obj_info_dump(*p);
1422show_vm_pcs(
const rb_control_frame_t *cfp,
1423 const rb_control_frame_t *end_of_cfp)
1426 while (cfp != end_of_cfp) {
1429 pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
1431 fprintf(stderr,
"%2d pc: %d\n", i++, pc);
1432 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1438cont_capture(
volatile int *
volatile stat)
1440 rb_context_t *
volatile cont;
1441 rb_thread_t *th = GET_THREAD();
1442 volatile VALUE contval;
1443 const rb_execution_context_t *ec = th->ec;
1445 THREAD_MUST_BE_RUNNING(th);
1446 rb_vm_stack_to_heap(th->ec);
1447 cont = cont_new(rb_cContinuation);
1448 contval = cont->self;
1450#ifdef CAPTURE_JUST_VALID_VM_STACK
1451 cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
1452 cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (
VALUE*)ec->cfp;
1453 cont->saved_vm_stack.ptr =
ALLOC_N(
VALUE, cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
1454 MEMCPY(cont->saved_vm_stack.ptr,
1456 VALUE, cont->saved_vm_stack.slen);
1457 MEMCPY(cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1460 cont->saved_vm_stack.clen);
1462 cont->saved_vm_stack.ptr =
ALLOC_N(
VALUE, ec->vm_stack_size);
1463 MEMCPY(cont->saved_vm_stack.ptr, ec->vm_stack,
VALUE, ec->vm_stack_size);
1466 rb_ec_set_vm_stack(&cont->saved_ec, NULL, 0);
1467 VM_ASSERT(cont->saved_ec.cfp != NULL);
1468 cont_save_machine_stack(th, cont);
1470 if (ruby_setjmp(cont->jmpbuf)) {
1473 VAR_INITIALIZED(cont);
1474 value = cont->value;
1475 if (cont->argc == -1) rb_exc_raise(value);
1487cont_restore_thread(rb_context_t *cont)
1489 rb_thread_t *th = GET_THREAD();
1492 if (cont->type == CONTINUATION_CONTEXT) {
1494 rb_execution_context_t *sec = &cont->saved_ec;
1495 rb_fiber_t *fiber = NULL;
1497 if (sec->fiber_ptr != NULL) {
1498 fiber = sec->fiber_ptr;
1500 else if (th->root_fiber) {
1501 fiber = th->root_fiber;
1504 if (fiber && th->ec != &fiber->cont.saved_ec) {
1505 ec_switch(th, fiber);
1508 if (th->ec->trace_arg != sec->trace_arg) {
1513#ifdef CAPTURE_JUST_VALID_VM_STACK
1515 cont->saved_vm_stack.ptr,
1516 VALUE, cont->saved_vm_stack.slen);
1517 MEMCPY(th->ec->vm_stack + th->ec->vm_stack_size - cont->saved_vm_stack.clen,
1518 cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen,
1519 VALUE, cont->saved_vm_stack.clen);
1521 MEMCPY(th->ec->vm_stack, cont->saved_vm_stack.ptr,
VALUE, sec->vm_stack_size);
1525 th->ec->cfp = sec->cfp;
1526 th->ec->raised_flag = sec->raised_flag;
1527 th->ec->tag = sec->tag;
1528 th->ec->root_lep = sec->root_lep;
1529 th->ec->root_svar = sec->root_svar;
1530 th->ec->errinfo = sec->errinfo;
1532 VM_ASSERT(th->ec->vm_stack != NULL);
1536 fiber_restore_thread(th, (rb_fiber_t*)cont);
1540NOINLINE(
static void fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber));
1543fiber_setcontext(rb_fiber_t *new_fiber, rb_fiber_t *old_fiber)
1545 rb_thread_t *th = GET_THREAD();
1548 if (!FIBER_TERMINATED_P(old_fiber)) {
1549 STACK_GROW_DIR_DETECTION;
1550 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
1551 if (STACK_DIR_UPPER(0, 1)) {
1552 old_fiber->cont.machine.stack_size = th->ec->machine.stack_start - th->ec->machine.stack_end;
1553 old_fiber->cont.machine.stack = th->ec->machine.stack_end;
1556 old_fiber->cont.machine.stack_size = th->ec->machine.stack_end - th->ec->machine.stack_start;
1557 old_fiber->cont.machine.stack = th->ec->machine.stack_start;
1562 old_fiber->cont.saved_ec.machine.stack_start = th->ec->machine.stack_start;
1563 old_fiber->cont.saved_ec.machine.stack_end = FIBER_TERMINATED_P(old_fiber) ? NULL : th->ec->machine.stack_end;
1568#if defined(COROUTINE_SANITIZE_ADDRESS)
1569 __sanitizer_start_switch_fiber(FIBER_TERMINATED_P(old_fiber) ? NULL : &old_fiber->context.fake_stack, new_fiber->context.stack_base, new_fiber->context.stack_size);
1573 struct coroutine_context * from = coroutine_transfer(&old_fiber->context, &new_fiber->context);
1575#if defined(COROUTINE_SANITIZE_ADDRESS)
1576 __sanitizer_finish_switch_fiber(old_fiber->context.fake_stack, NULL, NULL);
1584 fiber_restore_thread(th, old_fiber);
1590NOINLINE(NORETURN(
static void cont_restore_1(rb_context_t *)));
1593cont_restore_1(rb_context_t *cont)
1595 cont_restore_thread(cont);
1598#if defined(_M_AMD64) && !defined(__MINGW64__)
1603 _JUMP_BUFFER *bp = (
void*)&cont->jmpbuf;
1604 bp->Frame = ((_JUMP_BUFFER*)((
void*)&buf))->Frame;
1607 if (cont->machine.stack_src) {
1608 FLUSH_REGISTER_WINDOWS;
1609 MEMCPY(cont->machine.stack_src, cont->machine.stack,
1610 VALUE, cont->machine.stack_size);
1613 ruby_longjmp(cont->jmpbuf, 1);
1616NORETURN(NOINLINE(
static void cont_restore_0(rb_context_t *,
VALUE *)));
1619cont_restore_0(rb_context_t *cont,
VALUE *addr_in_prev_frame)
1621 if (cont->machine.stack_src) {
1623#define STACK_PAD_SIZE 1
1625#define STACK_PAD_SIZE 1024
1627 VALUE space[STACK_PAD_SIZE];
1629#if !STACK_GROW_DIRECTION
1630 if (addr_in_prev_frame > &space[0]) {
1633#if STACK_GROW_DIRECTION <= 0
1634 volatile VALUE *
const end = cont->machine.stack_src;
1635 if (&space[0] > end) {
1644 cont_restore_0(cont, &space[0]);
1648#if !STACK_GROW_DIRECTION
1653#if STACK_GROW_DIRECTION >= 0
1654 volatile VALUE *
const end = cont->machine.stack_src + cont->machine.stack_size;
1655 if (&space[STACK_PAD_SIZE] < end) {
1660 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
1664#if !STACK_GROW_DIRECTION
1668 cont_restore_1(cont);
1755rb_callcc(
VALUE self)
1757 volatile int called;
1758 volatile VALUE val = cont_capture(&called);
1767#ifdef RUBY_ASAN_ENABLED
1770MAYBE_UNUSED(
static void notusing_callcc(
void)) { rb_callcc(
Qnil); }
1771# define rb_callcc rb_f_notimplement
1776make_passing_arg(
int argc,
const VALUE *argv)
1792NORETURN(
static VALUE rb_cont_call(
int argc,
VALUE *argv,
VALUE contval));
1810rb_cont_call(
int argc,
VALUE *argv,
VALUE contval)
1812 rb_context_t *cont = cont_ptr(contval);
1813 rb_thread_t *th = GET_THREAD();
1815 if (cont_thread_value(cont) != th->self) {
1818 if (cont->saved_ec.fiber_ptr) {
1819 if (th->ec->fiber_ptr != cont->saved_ec.fiber_ptr) {
1825 cont->value = make_passing_arg(argc, argv);
1827 cont_restore_0(cont, &contval);
1920 {fiber_mark, fiber_free, fiber_memsize, fiber_compact,},
1921 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1925fiber_alloc(
VALUE klass)
1931fiber_t_alloc(
VALUE fiber_value,
unsigned int blocking)
1934 rb_thread_t *th = GET_THREAD();
1940 THREAD_MUST_BE_RUNNING(th);
1941 fiber =
ZALLOC(rb_fiber_t);
1942 fiber->cont.self = fiber_value;
1943 fiber->cont.type = FIBER_CONTEXT;
1944 fiber->blocking = blocking;
1946 cont_init(&fiber->cont, th);
1948 fiber->cont.saved_ec.fiber_ptr = fiber;
1949 rb_ec_clear_vm_stack(&fiber->cont.saved_ec);
1955 VM_ASSERT(FIBER_CREATED_P(fiber));
1963root_fiber_alloc(rb_thread_t *th)
1965 VALUE fiber_value = fiber_alloc(rb_cFiber);
1966 rb_fiber_t *fiber = th->ec->fiber_ptr;
1968 VM_ASSERT(
DATA_PTR(fiber_value) == NULL);
1969 VM_ASSERT(fiber->cont.type == FIBER_CONTEXT);
1970 VM_ASSERT(FIBER_RESUMED_P(fiber));
1972 th->root_fiber = fiber;
1974 fiber->cont.self = fiber_value;
1976 coroutine_initialize_main(&fiber->context);
1981static inline rb_fiber_t*
1984 rb_execution_context_t *ec = GET_EC();
1985 if (ec->fiber_ptr->cont.self == 0) {
1986 root_fiber_alloc(rb_ec_thread_ptr(ec));
1988 return ec->fiber_ptr;
1992current_fiber_storage(
void)
1994 rb_execution_context_t *ec = GET_EC();
1999inherit_fiber_storage(
void)
2007 fiber->cont.saved_ec.storage = storage;
2011fiber_storage_get(rb_fiber_t *fiber,
int allocate)
2013 VALUE storage = fiber->cont.saved_ec.storage;
2014 if (storage ==
Qnil && allocate) {
2016 fiber_storage_set(fiber, storage);
2022storage_access_must_be_from_same_fiber(
VALUE self)
2024 rb_fiber_t *fiber = fiber_ptr(self);
2025 rb_fiber_t *current = fiber_current();
2026 if (fiber != current) {
2027 rb_raise(rb_eArgError,
"Fiber storage can only be accessed from the Fiber it belongs to");
2038rb_fiber_storage_get(
VALUE self)
2040 storage_access_must_be_from_same_fiber(self);
2042 VALUE storage = fiber_storage_get(fiber_ptr(self), FALSE);
2044 if (storage ==
Qnil) {
2061fiber_storage_validate(
VALUE value)
2064 if (value ==
Qnil)
return;
2104 "Fiber#storage= is experimental and may be removed in the future!");
2107 storage_access_must_be_from_same_fiber(self);
2108 fiber_storage_validate(value);
2110 fiber_ptr(self)->cont.saved_ec.storage =
rb_obj_dup(value);
2129 VALUE storage = fiber_storage_get(fiber_current(), FALSE);
2132 return rb_hash_aref(storage, key);
2150 VALUE storage = fiber_storage_get(fiber_current(), value !=
Qnil);
2153 if (value ==
Qnil) {
2154 return rb_hash_delete(storage, key);
2157 return rb_hash_aset(storage, key, value);
2166 storage = inherit_fiber_storage();
2169 fiber_storage_validate(storage);
2173 rb_fiber_t *fiber = fiber_t_alloc(self, blocking);
2175 fiber->cont.saved_ec.storage = storage;
2176 fiber->first_proc = proc;
2177 fiber->stack.base = NULL;
2184fiber_prepare_stack(rb_fiber_t *fiber)
2186 rb_context_t *cont = &fiber->cont;
2187 rb_execution_context_t *sec = &cont->saved_ec;
2189 size_t vm_stack_size = 0;
2190 VALUE *vm_stack = fiber_initialize_coroutine(fiber, &vm_stack_size);
2193 cont->saved_vm_stack.ptr = NULL;
2194 rb_ec_initialize_vm_stack(sec, vm_stack, vm_stack_size /
sizeof(
VALUE));
2197 sec->local_storage = NULL;
2198 sec->local_storage_recursive_hash =
Qnil;
2199 sec->local_storage_recursive_hash_for_trace =
Qnil;
2203rb_fiber_pool_default(
VALUE pool)
2205 return &shared_fiber_pool;
2211 fiber->cont.saved_ec.storage = storage;
2217rb_fiber_initialize_kw(
int argc,
VALUE* argv,
VALUE self,
int kw_splat)
2228 rb_get_kwargs(options, fiber_initialize_keywords, 0, 3, arguments);
2230 if (!UNDEF_P(arguments[0])) {
2231 blocking = arguments[0];
2234 if (!UNDEF_P(arguments[1])) {
2235 pool = arguments[1];
2238 storage = arguments[2];
2241 return fiber_initialize(self,
rb_block_proc(), rb_fiber_pool_default(pool),
RTEST(blocking), storage);
2294rb_fiber_initialize(
int argc,
VALUE* argv,
VALUE self)
2302 return fiber_initialize(fiber_alloc(rb_cFiber),
rb_proc_new(func, obj), rb_fiber_pool_default(
Qnil), 0, storage);
2308 return rb_fiber_new_storage(func, obj,
Qtrue);
2312rb_fiber_s_schedule_kw(
int argc,
VALUE* argv,
int kw_splat)
2314 rb_thread_t * th = GET_THREAD();
2315 VALUE scheduler = th->scheduler;
2318 if (scheduler !=
Qnil) {
2370rb_fiber_s_schedule(
int argc,
VALUE *argv,
VALUE obj)
2386rb_fiber_s_scheduler(
VALUE klass)
2400rb_fiber_current_scheduler(
VALUE klass)
2422rb_fiber_set_scheduler(
VALUE klass,
VALUE scheduler)
2427NORETURN(
static void rb_fiber_terminate(rb_fiber_t *fiber,
int need_interrupt,
VALUE err));
2430rb_fiber_start(rb_fiber_t *fiber)
2432 rb_thread_t *
volatile th = fiber->cont.saved_ec.thread_ptr;
2435 enum ruby_tag_type state;
2437 VM_ASSERT(th->ec == GET_EC());
2438 VM_ASSERT(FIBER_RESUMED_P(fiber));
2440 if (fiber->blocking) {
2444 EC_PUSH_TAG(th->ec);
2445 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2446 rb_context_t *cont = &VAR_FROM_MEMORY(fiber)->cont;
2448 const VALUE *argv, args = cont->value;
2449 GetProcPtr(fiber->first_proc, proc);
2452 th->ec->errinfo =
Qnil;
2453 th->ec->root_lep = rb_vm_proc_local_ep(fiber->first_proc);
2454 th->ec->root_svar =
Qfalse;
2457 cont->value = rb_vm_invoke_proc(th->ec, proc, argc, argv, cont->kw_splat, VM_BLOCK_HANDLER_NONE);
2461 int need_interrupt = TRUE;
2464 err = th->ec->errinfo;
2465 VM_ASSERT(FIBER_RESUMED_P(fiber));
2467 if (state == TAG_RAISE) {
2470 else if (state == TAG_FATAL && err == RUBY_FATAL_FIBER_KILLED) {
2471 need_interrupt = FALSE;
2474 else if (state == TAG_FATAL) {
2475 rb_threadptr_pending_interrupt_enque(th, err);
2478 err = rb_vm_make_jump_tag_but_local_jump(state, err);
2482 rb_fiber_terminate(fiber, need_interrupt, err);
2487rb_threadptr_root_fiber_setup(rb_thread_t *th)
2489 rb_fiber_t *fiber = ruby_mimcalloc(1,
sizeof(rb_fiber_t));
2491 rb_bug(
"%s", strerror(
errno));
2493 fiber->cont.type = FIBER_CONTEXT;
2494 fiber->cont.saved_ec.fiber_ptr = fiber;
2495 fiber->cont.saved_ec.thread_ptr = th;
2496 fiber->blocking = 1;
2498 fiber_status_set(fiber, FIBER_RESUMED);
2499 th->ec = &fiber->cont.saved_ec;
2500 cont_init_jit_cont(&fiber->cont);
2504rb_threadptr_root_fiber_release(rb_thread_t *th)
2506 if (th->root_fiber) {
2510 rb_execution_context_t *ec = rb_current_execution_context(
false);
2512 VM_ASSERT(th->ec->fiber_ptr->cont.type == FIBER_CONTEXT);
2513 VM_ASSERT(th->ec->fiber_ptr->cont.self == 0);
2515 if (ec && th->ec == ec) {
2516 rb_ractor_set_current_ec(th->ractor, NULL);
2518 fiber_free(th->ec->fiber_ptr);
2524rb_threadptr_root_fiber_terminate(rb_thread_t *th)
2526 rb_fiber_t *fiber = th->ec->fiber_ptr;
2528 fiber->status = FIBER_TERMINATED;
2531 rb_ec_clear_vm_stack(th->ec);
2534static inline rb_fiber_t*
2535return_fiber(
bool terminate)
2537 rb_fiber_t *fiber = fiber_current();
2538 rb_fiber_t *prev = fiber->prev;
2542 prev->resuming_fiber = NULL;
2547 rb_raise(rb_eFiberError,
"attempt to yield on a not resumed fiber");
2550 rb_thread_t *th = GET_THREAD();
2551 rb_fiber_t *root_fiber = th->root_fiber;
2553 VM_ASSERT(root_fiber != NULL);
2556 for (fiber = root_fiber; fiber->resuming_fiber; fiber = fiber->resuming_fiber) {
2566 return fiber_current()->cont.self;
2571fiber_store(rb_fiber_t *next_fiber, rb_thread_t *th)
2575 if (th->ec->fiber_ptr != NULL) {
2576 fiber = th->ec->fiber_ptr;
2580 fiber = root_fiber_alloc(th);
2583 if (FIBER_CREATED_P(next_fiber)) {
2584 fiber_prepare_stack(next_fiber);
2587 VM_ASSERT(FIBER_RESUMED_P(fiber) || FIBER_TERMINATED_P(fiber));
2588 VM_ASSERT(FIBER_RUNNABLE_P(next_fiber));
2590 if (FIBER_RESUMED_P(fiber)) fiber_status_set(fiber, FIBER_SUSPENDED);
2592 fiber_status_set(next_fiber, FIBER_RESUMED);
2593 fiber_setcontext(next_fiber, fiber);
2597fiber_check_killed(rb_fiber_t *fiber)
2599 VM_ASSERT(fiber == fiber_current());
2601 if (fiber->killed) {
2602 rb_thread_t *thread = fiber->cont.saved_ec.thread_ptr;
2604 thread->ec->errinfo = RUBY_FATAL_FIBER_KILLED;
2605 EC_JUMP_TAG(thread->ec, RUBY_TAG_FATAL);
2610fiber_switch(rb_fiber_t *fiber,
int argc,
const VALUE *argv,
int kw_splat, rb_fiber_t *resuming_fiber,
bool yielding)
2613 rb_context_t *cont = &fiber->cont;
2614 rb_thread_t *th = GET_THREAD();
2617 if (th->root_fiber == NULL) root_fiber_alloc(th);
2619 if (th->ec->fiber_ptr == fiber) {
2623 return make_passing_arg(argc, argv);
2626 if (cont_thread_value(cont) != th->self) {
2627 rb_raise(rb_eFiberError,
"fiber called across threads");
2630 if (FIBER_TERMINATED_P(fiber)) {
2631 value =
rb_exc_new2(rb_eFiberError,
"dead fiber called");
2633 if (!FIBER_TERMINATED_P(th->ec->fiber_ptr)) {
2634 rb_exc_raise(value);
2635 VM_UNREACHABLE(fiber_switch);
2641 VM_ASSERT(FIBER_SUSPENDED_P(th->root_fiber));
2643 cont = &th->root_fiber->cont;
2645 cont->value = value;
2647 fiber_setcontext(th->root_fiber, th->ec->fiber_ptr);
2649 VM_UNREACHABLE(fiber_switch);
2653 VM_ASSERT(FIBER_RUNNABLE_P(fiber));
2655 rb_fiber_t *current_fiber = fiber_current();
2657 VM_ASSERT(!current_fiber->resuming_fiber);
2659 if (resuming_fiber) {
2660 current_fiber->resuming_fiber = resuming_fiber;
2661 fiber->prev = fiber_current();
2662 fiber->yielding = 0;
2665 VM_ASSERT(!current_fiber->yielding);
2667 current_fiber->yielding = 1;
2670 if (current_fiber->blocking) {
2675 cont->kw_splat = kw_splat;
2676 cont->value = make_passing_arg(argc, argv);
2678 fiber_store(fiber, th);
2681#ifndef COROUTINE_PTHREAD_CONTEXT
2682 if (resuming_fiber && FIBER_TERMINATED_P(fiber)) {
2683 fiber_stack_release(fiber);
2687 if (fiber_current()->blocking) {
2691 RUBY_VM_CHECK_INTS(th->ec);
2695 current_fiber = th->ec->fiber_ptr;
2696 value = current_fiber->cont.value;
2698 fiber_check_killed(current_fiber);
2700 if (current_fiber->cont.argc == -1) {
2702 rb_exc_raise(value);
2711 return fiber_switch(fiber_ptr(fiber_value), argc, argv,
RB_NO_KEYWORDS, NULL,
false);
2729rb_fiber_blocking_p(
VALUE fiber)
2731 return RBOOL(fiber_ptr(fiber)->blocking);
2735fiber_blocking_yield(
VALUE fiber_value)
2737 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2738 rb_thread_t *
volatile th = fiber->cont.saved_ec.thread_ptr;
2740 VM_ASSERT(fiber->blocking == 0);
2743 fiber->blocking = 1;
2752fiber_blocking_ensure(
VALUE fiber_value)
2754 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2755 rb_thread_t *
volatile th = fiber->cont.saved_ec.thread_ptr;
2758 fiber->blocking = 0;
2775rb_fiber_blocking(
VALUE class)
2778 rb_fiber_t *fiber = fiber_ptr(fiber_value);
2781 if (fiber->blocking) {
2785 return rb_ensure(fiber_blocking_yield, fiber_value, fiber_blocking_ensure, fiber_value);
2808rb_fiber_s_blocking_p(
VALUE klass)
2810 rb_thread_t *thread = GET_THREAD();
2811 unsigned blocking = thread->blocking;
2820rb_fiber_close(rb_fiber_t *fiber)
2822 fiber_status_set(fiber, FIBER_TERMINATED);
2826rb_fiber_terminate(rb_fiber_t *fiber,
int need_interrupt,
VALUE error)
2828 VALUE value = fiber->cont.value;
2830 VM_ASSERT(FIBER_RESUMED_P(fiber));
2831 rb_fiber_close(fiber);
2833 fiber->cont.machine.stack = NULL;
2834 fiber->cont.machine.stack_size = 0;
2836 rb_fiber_t *next_fiber = return_fiber(
true);
2838 if (need_interrupt) RUBY_VM_SET_INTERRUPT(&next_fiber->cont.saved_ec);
2841 fiber_switch(next_fiber, -1, &error,
RB_NO_KEYWORDS, NULL,
false);
2843 fiber_switch(next_fiber, 1, &value,
RB_NO_KEYWORDS, NULL,
false);
2848fiber_resume_kw(rb_fiber_t *fiber,
int argc,
const VALUE *argv,
int kw_splat)
2850 rb_fiber_t *current_fiber = fiber_current();
2852 if (argc == -1 && FIBER_CREATED_P(fiber)) {
2853 rb_raise(rb_eFiberError,
"cannot raise exception on unborn fiber");
2855 else if (FIBER_TERMINATED_P(fiber)) {
2856 rb_raise(rb_eFiberError,
"attempt to resume a terminated fiber");
2858 else if (fiber == current_fiber) {
2859 rb_raise(rb_eFiberError,
"attempt to resume the current fiber");
2861 else if (fiber->prev != NULL) {
2862 rb_raise(rb_eFiberError,
"attempt to resume a resumed fiber (double resume)");
2864 else if (fiber->resuming_fiber) {
2865 rb_raise(rb_eFiberError,
"attempt to resume a resuming fiber");
2867 else if (fiber->prev == NULL &&
2868 (!fiber->yielding && fiber->status != FIBER_CREATED)) {
2869 rb_raise(rb_eFiberError,
"attempt to resume a transferring fiber");
2872 return fiber_switch(fiber, argc, argv, kw_splat, fiber,
false);
2876rb_fiber_resume_kw(
VALUE self,
int argc,
const VALUE *argv,
int kw_splat)
2878 return fiber_resume_kw(fiber_ptr(self), argc, argv, kw_splat);
2884 return fiber_resume_kw(fiber_ptr(self), argc, argv,
RB_NO_KEYWORDS);
2888rb_fiber_yield_kw(
int argc,
const VALUE *argv,
int kw_splat)
2890 return fiber_switch(return_fiber(
false), argc, argv, kw_splat, NULL,
true);
2894rb_fiber_yield(
int argc,
const VALUE *argv)
2896 return fiber_switch(return_fiber(
false), argc, argv,
RB_NO_KEYWORDS, NULL,
true);
2900rb_fiber_reset_root_local_storage(rb_thread_t *th)
2902 if (th->root_fiber && th->root_fiber != th->ec->fiber_ptr) {
2903 th->ec->local_storage = th->root_fiber->cont.saved_ec.local_storage;
2918 return RBOOL(!FIBER_TERMINATED_P(fiber_ptr(fiber_value)));
2937rb_fiber_m_resume(
int argc,
VALUE *argv,
VALUE fiber)
2989rb_fiber_backtrace(
int argc,
VALUE *argv,
VALUE fiber)
2991 return rb_vm_backtrace(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
3014rb_fiber_backtrace_locations(
int argc,
VALUE *argv,
VALUE fiber)
3016 return rb_vm_backtrace_locations(argc, argv, &fiber_ptr(fiber)->cont.saved_ec);
3102rb_fiber_m_transfer(
int argc,
VALUE *argv,
VALUE self)
3108fiber_transfer_kw(rb_fiber_t *fiber,
int argc,
const VALUE *argv,
int kw_splat)
3110 if (fiber->resuming_fiber) {
3111 rb_raise(rb_eFiberError,
"attempt to transfer to a resuming fiber");
3114 if (fiber->yielding) {
3115 rb_raise(rb_eFiberError,
"attempt to transfer to a yielding fiber");
3118 return fiber_switch(fiber, argc, argv, kw_splat, NULL,
false);
3122rb_fiber_transfer_kw(
VALUE self,
int argc,
const VALUE *argv,
int kw_splat)
3124 return fiber_transfer_kw(fiber_ptr(self), argc, argv, kw_splat);
3138rb_fiber_s_yield(
int argc,
VALUE *argv,
VALUE klass)
3144fiber_raise(rb_fiber_t *fiber,
VALUE exception)
3146 if (fiber == fiber_current()) {
3147 rb_exc_raise(exception);
3149 else if (fiber->resuming_fiber) {
3150 return fiber_raise(fiber->resuming_fiber, exception);
3152 else if (FIBER_SUSPENDED_P(fiber) && !fiber->yielding) {
3163 VALUE exception = rb_make_exception(argc, argv);
3165 return fiber_raise(fiber_ptr(fiber), exception);
3194rb_fiber_m_raise(
int argc,
VALUE *argv,
VALUE self)
3196 return rb_fiber_raise(self, argc, argv);
3217rb_fiber_m_kill(
VALUE self)
3219 rb_fiber_t *fiber = fiber_ptr(self);
3221 if (fiber->killed)
return Qfalse;
3224 if (fiber->status == FIBER_CREATED) {
3225 fiber->status = FIBER_TERMINATED;
3227 else if (fiber->status != FIBER_TERMINATED) {
3228 if (fiber_current() == fiber) {
3229 fiber_check_killed(fiber);
3232 fiber_raise(fiber_ptr(self),
Qnil);
3247rb_fiber_s_current(
VALUE klass)
3253fiber_to_s(
VALUE fiber_value)
3255 const rb_fiber_t *fiber = fiber_ptr(fiber_value);
3257 char status_info[0x20];
3259 if (fiber->resuming_fiber) {
3260 snprintf(status_info, 0x20,
" (%s by resuming)", fiber_status_name(fiber->status));
3263 snprintf(status_info, 0x20,
" (%s)", fiber_status_name(fiber->status));
3268 strlcat(status_info,
">",
sizeof(status_info));
3273 GetProcPtr(fiber->first_proc, proc);
3274 return rb_block_to_s(fiber_value, &proc->block, status_info);
3277#ifdef HAVE_WORKING_FORK
3279rb_fiber_atfork(rb_thread_t *th)
3281 if (th->root_fiber) {
3282 if (&th->root_fiber->cont.saved_ec != th->ec) {
3283 th->root_fiber = th->ec->fiber_ptr;
3285 th->root_fiber->prev = 0;
3290#ifdef RB_EXPERIMENTAL_FIBER_POOL
3292fiber_pool_free(
void *ptr)
3295 RUBY_FREE_ENTER(
"fiber_pool");
3297 fiber_pool_allocation_free(
fiber_pool->allocations);
3300 RUBY_FREE_LEAVE(
"fiber_pool");
3304fiber_pool_memsize(
const void *ptr)
3307 size_t size =
sizeof(*fiber_pool);
3316 {NULL, fiber_pool_free, fiber_pool_memsize,},
3317 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3321fiber_pool_alloc(
VALUE klass)
3329rb_fiber_pool_initialize(
int argc,
VALUE* argv,
VALUE self)
3331 rb_thread_t *th = GET_THREAD();
3336 rb_scan_args(argc, argv,
"03", &size, &count, &vm_stack_size);
3339 size =
SIZET2NUM(th->vm->default_params.fiber_machine_stack_size);
3346 if (
NIL_P(vm_stack_size)) {
3347 vm_stack_size =
SIZET2NUM(th->vm->default_params.fiber_vm_stack_size);
3374 rb_thread_t *th = GET_THREAD();
3375 size_t vm_stack_size = th->vm->default_params.fiber_vm_stack_size;
3376 size_t machine_stack_size = th->vm->default_params.fiber_machine_stack_size;
3377 size_t stack_size = machine_stack_size + vm_stack_size;
3381 GetSystemInfo(&info);
3382 pagesize = info.dwPageSize;
3384 pagesize = sysconf(_SC_PAGESIZE);
3386 SET_MACHINE_STACK_END(&th->ec->machine.stack_end);
3388 fiber_pool_initialize(&shared_fiber_pool, stack_size, FIBER_POOL_INITIAL_SIZE, vm_stack_size);
3394 const char *fiber_shared_fiber_pool_free_stacks = getenv(
"RUBY_SHARED_FIBER_POOL_FREE_STACKS");
3395 if (fiber_shared_fiber_pool_free_stacks) {
3396 shared_fiber_pool.free_stacks = atoi(fiber_shared_fiber_pool_free_stacks);
3398 if (shared_fiber_pool.free_stacks < 0) {
3399 rb_warn(
"Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a negative value is not allowed.");
3400 shared_fiber_pool.free_stacks = 0;
3403 if (shared_fiber_pool.free_stacks > 1) {
3404 rb_warn(
"Setting RUBY_SHARED_FIBER_POOL_FREE_STACKS to a value greater than 1 is operating system specific, and may cause crashes.");
3425 rb_define_method(rb_cFiber,
"backtrace_locations", rb_fiber_backtrace_locations, -1);
3438#ifdef RB_EXPERIMENTAL_FIBER_POOL
3445 rb_define_method(rb_cFiberPool,
"initialize", rb_fiber_pool_initialize, -1);
3451RUBY_SYMBOL_EXPORT_BEGIN
3454ruby_Init_Continuation_body(
void)
3464RUBY_SYMBOL_EXPORT_END
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
#define RUBY_EVENT_FIBER_SWITCH
Encountered a Fiber#yield.
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
#define REALLOC_N
Old name of RB_REALLOC_N.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define ZALLOC
Old name of RB_ZALLOC.
#define CLASS_OF
Old name of rb_class_of.
#define rb_ary_new4
Old name of rb_ary_new_from_values.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define rb_exc_new2
Old name of rb_exc_new_cstr.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
VALUE rb_eStandardError
StandardError exception.
VALUE rb_eFrozenError
FrozenError exception.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
@ RB_WARN_CATEGORY_EXPERIMENTAL
Warning is for experimental features.
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
VALUE rb_obj_dup(VALUE obj)
Duplicates the given object.
void rb_memerror(void)
Triggers out-of-memory error.
VALUE rb_fiber_current(void)
Queries the fiber which is calling this function.
VALUE rb_hash_new(void)
Creates a new, empty hash object.
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_set_len(VALUE str, long len)
Overwrites the length of the string.
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
VALUE rb_yield(VALUE val)
Yields the block.
rb_block_call_func * rb_block_call_func_t
Shorthand type that represents an iterator-written-in-C function pointer.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
#define ALLOCA_N(type, n)
#define RB_ALLOC(type)
Shorthand of RB_ALLOC_N with n=1.
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
#define DATA_PTR(obj)
Convenient getter macro.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
struct rb_data_type_struct rb_data_type_t
This is the struct that holds necessary info for a struct.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define errno
Ractor-aware version of errno.
#define RB_NO_KEYWORDS
Do not pass keywords.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
VALUE rb_fiber_scheduler_get(void)
Queries the current scheduler of the current thread that is calling this function.
VALUE rb_fiber_scheduler_fiber(VALUE scheduler, int argc, VALUE *argv, int kw_splat)
Create and schedule a non-blocking fiber.
#define RTEST
This is an old name of RB_TEST.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.