24#include "eval_intern.h"
26#include "internal/bits.h"
27#include "internal/class.h"
28#include "internal/gc.h"
29#include "internal/hash.h"
30#include "internal/symbol.h"
31#include "internal/thread.h"
42static VALUE sym_default;
47 rb_event_hook_flag_t hook_flags;
55 unsigned int target_line;
61#define MAX_EVENT_NUM 32
64rb_hook_list_mark(rb_hook_list_t *hooks)
66 rb_event_hook_t *hook = hooks->hooks;
69 rb_gc_mark(hook->data);
75rb_hook_list_mark_and_update(rb_hook_list_t *hooks)
77 rb_event_hook_t *hook = hooks->hooks;
80 rb_gc_mark_and_move(&hook->data);
85static void clean_hooks(rb_hook_list_t *list);
88rb_hook_list_free(rb_hook_list_t *hooks)
90 hooks->need_clean =
true;
92 if (hooks->running == 0) {
99void rb_clear_attr_ccs(
void);
100void rb_clear_bf_ccs(
void);
106 rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS;
107 bool first_time_iseq_events_p = new_iseq_events & ~enabled_iseq_events;
114 if (first_time_iseq_events_p) {
116 rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
119 else if (enable_c_call || enable_c_return) {
122 else if (enable_call || enable_return) {
126 ruby_vm_event_flags = new_events;
127 ruby_vm_event_enabled_global_flags |= new_events;
128 rb_objspace_set_event_hook(new_events);
131 if (first_time_iseq_events_p || enable_c_call || enable_c_return) {
138 rb_yjit_tracing_invalidate_all();
139 rb_rjit_tracing_invalidate_all(new_iseq_events);
145static rb_event_hook_t *
148 rb_event_hook_t *hook;
151 rb_raise(
rb_eTypeError,
"Can not specify normal event and internal event simultaneously.");
154 hook =
ALLOC(rb_event_hook_t);
155 hook->hook_flags = hook_flags;
156 hook->events = events;
161 hook->filter.th = NULL;
162 hook->filter.target_line = 0;
168hook_list_connect(
VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook,
int global_p)
171 hook->next = list->hooks;
173 list->events |= hook->events;
177 update_global_event_hook(prev_events, list->events);
185connect_event_hook(
const rb_execution_context_t *ec, rb_event_hook_t *hook)
187 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
188 hook_list_connect(
Qundef, list, hook, TRUE);
192rb_threadptr_add_event_hook(
const rb_execution_context_t *ec, rb_thread_t *th,
195 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
196 hook->filter.th = th;
197 connect_event_hook(ec, hook);
203 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
209 rb_add_event_hook2(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
215 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
221 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
222 connect_event_hook(GET_EC(), hook);
226clean_hooks(rb_hook_list_t *list)
228 rb_event_hook_t *hook, **nextp = &list->hooks;
231 VM_ASSERT(list->running == 0);
232 VM_ASSERT(list->need_clean ==
true);
235 list->need_clean =
false;
237 while ((hook = *nextp) != 0) {
238 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
243 list->events |= hook->events;
248 if (list->is_local) {
249 if (list->events == 0) {
255 update_global_event_hook(prev_events, list->events);
260clean_hooks_check(rb_hook_list_t *list)
262 if (UNLIKELY(list->need_clean)) {
263 if (list->running == 0) {
269#define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
275 rb_hook_list_t *list = rb_ec_ractor_hooks(ec);
277 rb_event_hook_t *hook = list->hooks;
280 if (func == 0 || hook->func == func) {
281 if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
282 if (UNDEF_P(data) || hook->data == data) {
283 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
285 list->need_clean =
true;
292 clean_hooks_check(list);
297rb_threadptr_remove_event_hook(
const rb_execution_context_t *ec,
const rb_thread_t *filter_th,
rb_event_hook_func_t func,
VALUE data)
299 return remove_event_hook(ec, filter_th, func, data);
305 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func,
Qundef);
311 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
317 return remove_event_hook(GET_EC(), NULL, func,
Qundef);
323 return remove_event_hook(GET_EC(), NULL, func, data);
327rb_ec_clear_current_thread_trace_func(
const rb_execution_context_t *ec)
329 rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0,
Qundef);
333rb_ec_clear_all_trace_func(
const rb_execution_context_t *ec)
335 rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0,
Qundef);
341exec_hooks_body(
const rb_execution_context_t *ec, rb_hook_list_t *list,
const rb_trace_arg_t *trace_arg)
343 rb_event_hook_t *hook;
345 for (hook = list->hooks; hook; hook = hook->next) {
346 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) &&
347 (trace_arg->event & hook->events) &&
348 (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
349 (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (
unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
350 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_RAW_ARG)) {
351 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
354 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
361exec_hooks_precheck(
const rb_execution_context_t *ec, rb_hook_list_t *list,
const rb_trace_arg_t *trace_arg)
363 if (list->events & trace_arg->event) {
373exec_hooks_postcheck(
const rb_execution_context_t *ec, rb_hook_list_t *list)
376 clean_hooks_check(list);
380exec_hooks_unprotected(
const rb_execution_context_t *ec, rb_hook_list_t *list,
const rb_trace_arg_t *trace_arg)
382 if (exec_hooks_precheck(ec, list, trace_arg) == 0)
return;
383 exec_hooks_body(ec, list, trace_arg);
384 exec_hooks_postcheck(ec, list);
388exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list,
const rb_trace_arg_t *trace_arg)
390 enum ruby_tag_type state;
393 if (exec_hooks_precheck(ec, list, trace_arg) == 0)
return 0;
395 raised = rb_ec_reset_raised(ec);
400 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
401 exec_hooks_body(ec, list, trace_arg);
405 exec_hooks_postcheck(ec, list);
408 rb_ec_set_raised(ec);
416rb_exec_event_hooks(
rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks,
int pop_p)
418 rb_execution_context_t *ec = trace_arg->ec;
427 ec->trace_arg = trace_arg;
429 exec_hooks_unprotected(ec, rb_ec_ractor_hooks(ec), trace_arg);
430 ec->trace_arg = prev_trace_arg;
434 if (ec->trace_arg == NULL &&
435 trace_arg->self != rb_mRubyVMFrozenCore ) {
436 const VALUE errinfo = ec->errinfo;
437 const VALUE old_recursive = ec->local_storage_recursive_hash;
438 enum ruby_tag_type state = 0;
441 ec->local_storage_recursive_hash = ec->local_storage_recursive_hash_for_trace;
443 ec->trace_arg = trace_arg;
446 if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
447 ec->errinfo = errinfo;
451 ec->trace_arg = NULL;
452 ec->local_storage_recursive_hash_for_trace = ec->local_storage_recursive_hash;
453 ec->local_storage_recursive_hash = old_recursive;
457 if (VM_FRAME_FINISHED_P(ec->cfp)) {
458 ec->tag = ec->tag->prev;
462 EC_JUMP_TAG(ec, state);
473 rb_execution_context_t *
const ec = GET_EC();
474 rb_vm_t *
const vm = rb_ec_vm_ptr(ec);
475 enum ruby_tag_type state;
477 dummy_trace_arg.event = 0;
479 if (!ec->trace_arg) {
480 ec->trace_arg = &dummy_trace_arg;
483 raised = rb_ec_reset_raised(ec);
486 if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
487 result = (*func)(arg);
495 rb_ec_reset_raised(ec);
498 if (ec->trace_arg == &dummy_trace_arg) {
499 ec->trace_arg = NULL;
503#if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
506 EC_JUMP_TAG(ec, state);
596thread_add_trace_func(rb_execution_context_t *ec, rb_thread_t *filter_th,
VALUE trace)
602 rb_threadptr_add_event_hook(ec, filter_th, call_trace_func,
RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
615thread_add_trace_func_m(
VALUE obj,
VALUE trace)
617 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
633thread_set_trace_func_m(
VALUE target_thread,
VALUE trace)
635 rb_execution_context_t *ec = GET_EC();
636 rb_thread_t *target_th = rb_thread_ptr(target_thread);
638 rb_threadptr_remove_event_hook(ec, target_th, call_trace_func,
Qundef);
644 thread_add_trace_func(ec, target_th, trace);
672#define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
679 C(c_return, C_RETURN);
682 C(b_return, B_RETURN);
683 C(thread_begin, THREAD_BEGIN);
684 C(thread_end, THREAD_END);
685 C(fiber_switch, FIBER_SWITCH);
686 C(script_compiled, SCRIPT_COMPILED);
695get_path_and_lineno(
const rb_execution_context_t *ec,
const rb_control_frame_t *cfp,
rb_event_flag_t event,
VALUE *pathp,
int *linep)
697 cfp = rb_vm_get_ruby_level_next_cfp(ec, cfp);
700 const rb_iseq_t *iseq = cfp->iseq;
701 *pathp = rb_iseq_path(iseq);
706 *linep =
FIX2INT(rb_iseq_first_lineno(iseq));
709 *linep = rb_vm_get_sourceline(cfp);
725 const rb_execution_context_t *ec = GET_EC();
727 get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
730 rb_ec_frame_method_id_and_class(ec, &
id, 0, &klass);
735 klass =
RBASIC(klass)->klass;
737 else if (RCLASS_SINGLETON_P(klass)) {
738 klass = RCLASS_ATTACHED_OBJECT(klass);
747 if (self && (filename !=
Qnil) &&
750 (VM_FRAME_RUBYFRAME_P(ec->cfp) && imemo_type_p((
VALUE)ec->cfp->iseq, imemo_iseq))) {
753 argv[5] = klass ? klass :
Qnil;
760static VALUE rb_cTracePoint;
765 rb_thread_t *target_th;
766 VALUE local_target_set;
770 void (*func)(
VALUE tpval,
void *data);
781 rb_gc_mark(tp->proc);
782 rb_gc_mark(tp->local_target_set);
783 if (tp->target_th) rb_gc_mark(tp->target_th->self);
793 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
804symbol2event_flag(
VALUE v)
807 VALUE sym = rb_to_symbol_type(v);
813#define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
820 C(c_return, C_RETURN);
823 C(b_return, B_RETURN);
824 C(thread_begin, THREAD_BEGIN);
825 C(thread_end, THREAD_END);
826 C(fiber_switch, FIBER_SWITCH);
827 C(script_compiled, SCRIPT_COMPILED);
832 C(a_return, A_RETURN);
834 rb_raise(rb_eArgError,
"unknown event: %"PRIsVALUE,
rb_sym2str(sym));
849 if (trace_arg == 0) {
858 return get_trace_arg();
864 return trace_arg->event;
870 return ID2SYM(get_event_id(trace_arg->event));
876 if (UNDEF_P(trace_arg->path)) {
877 get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
884 fill_path_and_lineno(trace_arg);
885 return INT2FIX(trace_arg->lineno);
890 fill_path_and_lineno(trace_arg);
891 return trace_arg->path;
897 if (!trace_arg->klass_solved) {
898 if (!trace_arg->klass) {
899 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
902 if (trace_arg->klass) {
904 trace_arg->klass =
RBASIC(trace_arg->klass)->klass;
908 trace_arg->klass =
Qnil;
911 trace_arg->klass_solved = 1;
918 switch (trace_arg->event) {
923 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
926 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
929 return rb_iseq_parameters(cfp->iseq, is_proc);
935 fill_id_and_klass(trace_arg);
936 if (trace_arg->klass && trace_arg->id) {
937 const rb_method_entry_t *me;
939 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->called_id, &iclass);
941 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->id, &iclass);
943 return rb_unnamed_parameters(rb_method_entry_arity(me));
962 fill_id_and_klass(trace_arg);
963 return trace_arg->id ?
ID2SYM(trace_arg->id) :
Qnil;
969 fill_id_and_klass(trace_arg);
970 return trace_arg->called_id ?
ID2SYM(trace_arg->called_id) :
Qnil;
976 fill_id_and_klass(trace_arg);
977 return trace_arg->klass;
983 rb_control_frame_t *cfp;
984 switch (trace_arg->event) {
989 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
991 if (cfp && imemo_type_p((
VALUE)cfp->iseq, imemo_iseq)) {
992 return rb_vm_make_binding(trace_arg->ec, cfp);
1002 return trace_arg->self;
1014 if (UNDEF_P(trace_arg->data)) {
1015 rb_bug(
"rb_tracearg_return_value: unreachable");
1017 return trace_arg->data;
1029 if (UNDEF_P(trace_arg->data)) {
1030 rb_bug(
"rb_tracearg_raised_exception: unreachable");
1032 return trace_arg->data;
1038 VALUE data = trace_arg->data;
1046 if (UNDEF_P(data)) {
1047 rb_bug(
"rb_tracearg_raised_exception: unreachable");
1049 if (rb_obj_is_iseq(data)) {
1062 VALUE data = trace_arg->data;
1070 if (UNDEF_P(data)) {
1071 rb_bug(
"rb_tracearg_raised_exception: unreachable");
1074 if (rb_obj_is_iseq(data)) {
1075 return rb_iseqw_new((
const rb_iseq_t *)data);
1082 return rb_iseqw_new((
const rb_iseq_t *)
RARRAY_AREF(data, 1));
1095 if (UNDEF_P(trace_arg->data)) {
1096 rb_bug(
"rb_tracearg_object: unreachable");
1098 return trace_arg->data;
1102tracepoint_attr_event(rb_execution_context_t *ec,
VALUE tpval)
1108tracepoint_attr_lineno(rb_execution_context_t *ec,
VALUE tpval)
1113tracepoint_attr_path(rb_execution_context_t *ec,
VALUE tpval)
1119tracepoint_attr_parameters(rb_execution_context_t *ec,
VALUE tpval)
1121 return rb_tracearg_parameters(get_trace_arg());
1125tracepoint_attr_method_id(rb_execution_context_t *ec,
VALUE tpval)
1131tracepoint_attr_callee_id(rb_execution_context_t *ec,
VALUE tpval)
1137tracepoint_attr_defined_class(rb_execution_context_t *ec,
VALUE tpval)
1143tracepoint_attr_binding(rb_execution_context_t *ec,
VALUE tpval)
1149tracepoint_attr_self(rb_execution_context_t *ec,
VALUE tpval)
1155tracepoint_attr_return_value(rb_execution_context_t *ec,
VALUE tpval)
1161tracepoint_attr_raised_exception(rb_execution_context_t *ec,
VALUE tpval)
1167tracepoint_attr_eval_script(rb_execution_context_t *ec,
VALUE tpval)
1169 return rb_tracearg_eval_script(get_trace_arg());
1173tracepoint_attr_instruction_sequence(rb_execution_context_t *ec,
VALUE tpval)
1175 return rb_tracearg_instruction_sequence(get_trace_arg());
1181 rb_tp_t *tp = tpptr(tpval);
1184 (*tp->func)(tpval, tp->data);
1187 if (tp->ractor == NULL || tp->ractor == GET_RACTOR()) {
1199 if (tp->local_target_set !=
Qfalse) {
1200 rb_raise(rb_eArgError,
"can't nest-enable a targeting TracePoint");
1207 if (tp->target_th) {
1208 rb_thread_add_event_hook2(tp->target_th->self, (
rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1209 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1213 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1219static const rb_iseq_t *
1220iseq_of(
VALUE target)
1224 rb_raise(rb_eArgError,
"specified target is not supported");
1227 return rb_iseqw_to_iseq(iseqv);
1231const rb_method_definition_t *rb_method_def(
VALUE method);
1234rb_tracepoint_enable_for_target(
VALUE tpval,
VALUE target,
VALUE target_line)
1236 rb_tp_t *tp = tpptr(tpval);
1237 const rb_iseq_t *iseq = iseq_of(target);
1239 unsigned int line = 0;
1240 bool target_bmethod =
false;
1242 if (tp->tracing > 0) {
1243 rb_raise(rb_eArgError,
"can't nest-enable a targeting TracePoint");
1246 if (!
NIL_P(target_line)) {
1248 rb_raise(rb_eArgError,
"target_line is specified, but line event is not specified");
1255 VM_ASSERT(tp->local_target_set ==
Qfalse);
1260 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1261 if (def->type == VM_METHOD_TYPE_BMETHOD &&
1263 if (def->body.bmethod.hooks == NULL) {
1264 def->body.bmethod.hooks =
ZALLOC(rb_hook_list_t);
1265 def->body.bmethod.hooks->is_local =
true;
1267 rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1268 rb_hash_aset(tp->local_target_set, target,
Qfalse);
1269 target_bmethod =
true;
1276 n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
1277 rb_hash_aset(tp->local_target_set, (
VALUE)iseq,
Qtrue);
1280 iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) {
1285 rb_raise(rb_eArgError,
"can not enable any hooks");
1288 rb_yjit_tracing_invalidate_all();
1289 rb_rjit_tracing_invalidate_all(tp->events);
1291 ruby_vm_event_local_num++;
1302 rb_iseq_remove_local_tracepoint_recursively((rb_iseq_t *)target, tpval);
1306 rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
1307 rb_hook_list_t *hooks = def->body.bmethod.hooks;
1308 VM_ASSERT(hooks != NULL);
1309 rb_hook_list_remove_tracepoint(hooks, tpval);
1311 if (hooks->events == 0) {
1312 rb_hook_list_free(def->body.bmethod.hooks);
1313 def->body.bmethod.hooks = NULL;
1326 if (tp->local_target_set) {
1327 rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1329 ruby_vm_event_local_num--;
1332 if (tp->target_th) {
1340 tp->target_th = NULL;
1345rb_hook_list_connect_tracepoint(
VALUE target, rb_hook_list_t *list,
VALUE tpval,
unsigned int target_line)
1347 rb_tp_t *tp = tpptr(tpval);
1348 rb_event_hook_t *hook = alloc_event_hook((
rb_event_hook_func_t)tp_call_trace, tp->events & ISEQ_TRACE_EVENTS, tpval,
1349 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1350 hook->filter.target_line = target_line;
1351 hook_list_connect(target, list, hook, FALSE);
1355rb_hook_list_remove_tracepoint(rb_hook_list_t *list,
VALUE tpval)
1357 rb_event_hook_t *hook = list->hooks;
1361 if (hook->data == tpval) {
1362 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
1363 list->need_clean =
true;
1365 else if ((hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) == 0) {
1366 events |= hook->events;
1371 list->events = events;
1375tracepoint_enable_m(rb_execution_context_t *ec,
VALUE tpval,
VALUE target,
VALUE target_line,
VALUE target_thread)
1377 rb_tp_t *tp = tpptr(tpval);
1378 int previous_tracing = tp->tracing;
1380 if (target_thread == sym_default) {
1385 target_thread =
Qnil;
1390 if (
RTEST(target_thread)) {
1391 if (tp->target_th) {
1392 rb_raise(rb_eArgError,
"can not override target_thread filter");
1394 tp->target_th = rb_thread_ptr(target_thread);
1396 RUBY_ASSERT(tp->target_th->self == target_thread);
1400 tp->target_th = NULL;
1403 if (
NIL_P(target)) {
1404 if (!
NIL_P(target_line)) {
1405 rb_raise(rb_eArgError,
"only target_line is specified");
1410 rb_tracepoint_enable_for_target(tpval, target, target_line);
1419 return RBOOL(previous_tracing);
1424tracepoint_disable_m(rb_execution_context_t *ec,
VALUE tpval)
1426 rb_tp_t *tp = tpptr(tpval);
1427 int previous_tracing = tp->tracing;
1430 if (tp->local_target_set !=
Qfalse) {
1431 rb_raise(rb_eArgError,
"can't disable a targeting TracePoint in a block");
1441 return RBOOL(previous_tracing);
1448 rb_tp_t *tp = tpptr(tpval);
1449 return RBOOL(tp->tracing);
1453tracepoint_enabled_p(rb_execution_context_t *ec,
VALUE tpval)
1461 VALUE tpval = tp_alloc(klass);
1469 tp->events = events;
1478 rb_thread_t *target_th = NULL;
1480 if (
RTEST(target_thval)) {
1481 target_th = rb_thread_ptr(target_thval);
1486 return tracepoint_new(rb_cTracePoint, target_th, events, func, data,
Qundef);
1490tracepoint_new_s(rb_execution_context_t *ec,
VALUE self,
VALUE args)
1497 for (i=0; i<argc; i++) {
1498 events |= symbol2event_flag(
RARRAY_AREF(args, i));
1506 rb_raise(rb_eArgError,
"must be called with a block");
1509 return tracepoint_new(self, 0, events, 0, 0,
rb_block_proc());
1513tracepoint_trace_s(rb_execution_context_t *ec,
VALUE self,
VALUE args)
1515 VALUE trace = tracepoint_new_s(ec, self, args);
1521tracepoint_inspect(rb_execution_context_t *ec,
VALUE self)
1523 rb_tp_t *tp = tpptr(self);
1527 switch (trace_arg->event) {
1533 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
":%d in '%"PRIsVALUE
"'>",
1543 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" '%"PRIsVALUE
"' %"PRIsVALUE
":%d>",
1550 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
">",
1556 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
":%d>",
1562 return rb_sprintf(
"#<TracePoint:%s>", tp->tracing ?
"enabled" :
"disabled");
1567tracepoint_stat_event_hooks(
VALUE hash,
VALUE key, rb_event_hook_t *hook)
1569 int active = 0, deleted = 0;
1572 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
1585tracepoint_stat_s(rb_execution_context_t *ec,
VALUE self)
1587 rb_vm_t *vm = GET_VM();
1590 tracepoint_stat_event_hooks(stat, vm->self, rb_ec_ractor_hooks(ec)->hooks);
1597disallow_reentry(
VALUE val)
1600 rb_execution_context_t *ec = GET_EC();
1601 if (ec->trace_arg != NULL) rb_bug(
"should be NULL, but %p", (
void *)ec->trace_arg);
1602 ec->trace_arg = arg;
1607tracepoint_allow_reentry(rb_execution_context_t *ec,
VALUE self)
1610 if (arg == NULL) rb_raise(
rb_eRuntimeError,
"No need to allow reentrance.");
1611 ec->trace_arg = NULL;
1615#include "trace_point.rbinc"
1645 struct ccan_list_node jnode;
1653rb_vm_memsize_workqueue(
struct ccan_list_head *workqueue)
1658 ccan_list_for_each(workqueue, work, jnode) {
1673 rb_vm_t *vm = GET_VM();
1675 if (!wq_job)
return FALSE;
1676 wq_job->func = func;
1677 wq_job->data = data;
1680 ccan_list_add_tail(&vm->workqueue, &wq_job->jnode);
1684 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm));
1689#define PJOB_TABLE_SIZE (sizeof(rb_atomic_t) * CHAR_BIT)
1695 } table[PJOB_TABLE_SIZE];
1699} rb_postponed_job_queues_t;
1702rb_vm_postponed_job_queue_init(rb_vm_t *vm)
1706 rb_postponed_job_queues_t *pjq = ruby_mimmalloc(
sizeof(rb_postponed_job_queues_t));
1707 pjq->triggered_bitset = 0;
1708 memset(pjq->table, 0,
sizeof(pjq->table));
1709 vm->postponed_job_queue = pjq;
1712static rb_execution_context_t *
1713get_valid_ec(rb_vm_t *vm)
1715 rb_execution_context_t *ec = rb_current_execution_context(
false);
1716 if (ec == NULL) ec = rb_vm_main_ractor_ec(vm);
1721rb_vm_postponed_job_atfork(
void)
1723 rb_vm_t *vm = GET_VM();
1724 rb_postponed_job_queues_t *pjq = vm->postponed_job_queue;
1727 if (pjq->triggered_bitset) {
1728 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
1735rb_vm_postponed_job_free(
void)
1737 rb_vm_t *vm = GET_VM();
1738 ruby_xfree(vm->postponed_job_queue);
1739 vm->postponed_job_queue = NULL;
1745rb_vm_memsize_postponed_job_queue(
void)
1747 return sizeof(rb_postponed_job_queues_t);
1763 rb_postponed_job_queues_t *pjq = GET_VM()->postponed_job_queue;
1764 for (
unsigned int i = 0; i < PJOB_TABLE_SIZE; i++) {
1767 if (existing_func == NULL || existing_func == func) {
1782 return POSTPONED_JOB_HANDLE_INVALID;
1788 rb_vm_t *vm = GET_VM();
1789 rb_postponed_job_queues_t *pjq = vm->postponed_job_queue;
1792 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
1802 if (h == POSTPONED_JOB_HANDLE_INVALID) {
1812 return pjob_register_legacy_impl(flags, func, data);
1818 return pjob_register_legacy_impl(flags, func, data);
1823rb_postponed_job_flush(rb_vm_t *vm)
1825 rb_postponed_job_queues_t *pjq = GET_VM()->postponed_job_queue;
1826 rb_execution_context_t *ec = GET_EC();
1827 const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK | TRAP_INTERRUPT_MASK;
1828 volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1829 VALUE volatile saved_errno = ec->errinfo;
1830 struct ccan_list_head tmp;
1832 ccan_list_head_init(&tmp);
1835 ccan_list_append_list(&tmp, &vm->workqueue);
1842 ec->interrupt_mask |= block_mask;
1845 if (EC_EXEC_TAG() == TAG_NONE) {
1847 while (triggered_bits) {
1848 unsigned int i = bit_length(triggered_bits) - 1;
1849 triggered_bits ^= ((1UL) << i);
1851 void *data = pjq->table[i].data;
1859 void *data = wq_job->data;
1868 ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1869 ec->errinfo = saved_errno;
1873 if (!ccan_list_empty(&tmp)) {
1875 ccan_list_prepend_list(&vm->workqueue, &tmp);
1878 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
1882 if (triggered_bits) {
1884 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ATOMIC_OR(var, val)
Atomically replaces the value pointed by var with the result of bitwise OR between val and the old va...
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_PTR_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are void*.
#define RUBY_ATOMIC_EXCHANGE(var, val)
Atomically replaces the value pointed by var with val.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
VALUE rb_tracearg_binding(rb_trace_arg_t *trace_arg)
Creates a binding object of the point where the trace is at.
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
VALUE rb_tracepoint_enabled_p(VALUE tpval)
Queries if the passed TracePoint is up and running.
VALUE rb_tracearg_object(rb_trace_arg_t *trace_arg)
Queries the allocated/deallocated object that the trace represents.
VALUE rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_method_id(), except it returns callee id like rb_frame_callee().
VALUE rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
Queries the class that defines the method that the passed trace is at.
VALUE rb_tracepoint_new(VALUE target_thread_not_supported_yet, rb_event_flag_t events, void(*func)(VALUE, void *), void *data)
Creates a tracepoint by registering a callback function for one or more tracepoint events.
struct rb_trace_arg_struct rb_trace_arg_t
Type that represents a specific trace event.
VALUE rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
Queries the raised exception that the trace represents.
void rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Identical to rb_add_event_hook(), except its effect is limited to the passed thread.
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
VALUE rb_tracepoint_disable(VALUE tpval)
Stops (disables) an already running instance of TracePoint.
VALUE rb_tracearg_self(rb_trace_arg_t *trace_arg)
Queries the receiver of the point trace is at.
int rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
Identical to rb_remove_event_hook(), except it additionally takes a thread argument.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register
VALUE rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
Queries the return value that the trace represents.
rb_event_flag_t rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
Queries the event of the passed trace.
VALUE rb_tracearg_path(rb_trace_arg_t *trace_arg)
Queries the file name of the point where the trace is at.
int rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
Identical to rb_thread_remove_event_hook(), except it additionally takes the data argument.
VALUE rb_tracepoint_enable(VALUE tpval)
Starts (enables) trace(s) defined by the passed object.
int rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
Schedules the given func to be called with data when Ruby next checks for interrupts.
VALUE rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
Queries the method name of the point where the trace is at.
int rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
Identical to rb_remove_event_hook(), except it additionally takes the data argument.
rb_trace_arg_t * rb_tracearg_from_tracepoint(VALUE tpval)
Queries the current event of the passed tracepoint.
VALUE rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
Queries the line of the point where the trace is at.
void(* rb_postponed_job_func_t)(void *arg)
Type of postponed jobs.
VALUE rb_tracearg_event(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_event_flag(), except it returns the name of the event in Ruby's symbol.
#define RUBY_EVENT_END
Encountered an end of a class clause.
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
#define RUBY_EVENT_TRACEPOINT_ALL
Bitmask of extended events.
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Registers an event hook function.
#define RUBY_EVENT_RAISE
Encountered a raise statement.
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
#define RUBY_INTERNAL_EVENT_MASK
Bitmask of internal events.
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
#define RUBY_EVENT_ALL
Bitmask of traditional events.
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
#define RUBY_EVENT_CLASS
Encountered a new class.
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
#define RUBY_EVENT_LINE
Encountered a new line.
#define RUBY_EVENT_RETURN
Encountered a return statement.
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
int rb_block_given_p(void)
Determines if the current method is given a block.
#define rb_str_new2
Old name of rb_str_new_cstr.
#define ALLOC
Old name of RB_ALLOC.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define ID2SYM
Old name of RB_ID2SYM.
#define ZALLOC
Old name of RB_ZALLOC.
#define FIX2INT
Old name of RB_FIX2INT.
#define NUM2UINT
Old name of RB_NUM2UINT.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_cThread
Thread class.
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
VALUE rb_hash_new(void)
Creates a new, empty hash object.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
VALUE rb_obj_is_method(VALUE recv)
Queries if the given object is a method.
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
VALUE rb_thread_current(void)
Obtains the "current" thread.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
VALUE rb_yield(VALUE val)
Yields the block.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_AREF(a, i)
#define RBASIC(obj)
Convenient casting macro.
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
struct rb_data_type_struct rb_data_type_t
This is the struct that holds necessary info for a struct.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define RTEST
This is an old name of RB_TEST.
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.