Ruby 3.4.5p51 (2025-07-16 revision 20cda200d3ce092571d0b5d342dadca69636cb0f)
gc.c
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/cont.h"
95#include "internal/error.h"
96#include "internal/eval.h"
97#include "internal/gc.h"
98#include "internal/hash.h"
99#include "internal/imemo.h"
100#include "internal/io.h"
101#include "internal/numeric.h"
102#include "internal/object.h"
103#include "internal/proc.h"
104#include "internal/rational.h"
105#include "internal/sanitizers.h"
106#include "internal/struct.h"
107#include "internal/symbol.h"
108#include "internal/thread.h"
109#include "internal/variable.h"
110#include "internal/warnings.h"
111#include "rjit.h"
112#include "probes.h"
113#include "regint.h"
114#include "ruby/debug.h"
115#include "ruby/io.h"
116#include "ruby/re.h"
117#include "ruby/st.h"
118#include "ruby/thread.h"
119#include "ruby/util.h"
120#include "ruby/vm.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "vm_core.h"
125#include "vm_sync.h"
126#include "vm_callinfo.h"
127#include "ractor_core.h"
128#include "yjit.h"
129
130#include "builtin.h"
131#include "shape.h"
132
133unsigned int
134rb_gc_vm_lock(void)
135{
136 unsigned int lev;
137 RB_VM_LOCK_ENTER_LEV(&lev);
138 return lev;
139}
140
141void
142rb_gc_vm_unlock(unsigned int lev)
143{
144 RB_VM_LOCK_LEAVE_LEV(&lev);
145}
146
147unsigned int
148rb_gc_cr_lock(void)
149{
150 unsigned int lev;
151 RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
152 return lev;
153}
154
155void
156rb_gc_cr_unlock(unsigned int lev)
157{
158 RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
159}
160
161unsigned int
162rb_gc_vm_lock_no_barrier(void)
163{
164 unsigned int lev = 0;
165 RB_VM_LOCK_ENTER_LEV_NB(&lev);
166 return lev;
167}
168
169void
170rb_gc_vm_unlock_no_barrier(unsigned int lev)
171{
172 RB_VM_LOCK_LEAVE_LEV(&lev);
173}
174
175void
176rb_gc_vm_barrier(void)
177{
178 rb_vm_barrier();
179}
180
181#if USE_MODULAR_GC
182void *
183rb_gc_get_ractor_newobj_cache(void)
184{
185 return GET_RACTOR()->newobj_cache;
186}
187
188void
189rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
190{
191 rb_native_mutex_initialize(&context->lock);
192 context->ec = GET_EC();
193}
194
195void
196rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_lock(&context->lock);
199
200 GC_ASSERT(rb_current_execution_context(false) == NULL);
201
202#ifdef RB_THREAD_LOCAL_SPECIFIER
203 rb_current_ec_set(context->ec);
204#else
205 native_tls_set(ruby_current_ec_key, context->ec);
206#endif
207}
208
209void
210rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
211{
212 rb_native_mutex_unlock(&context->lock);
213
214 GC_ASSERT(rb_current_execution_context(true) == context->ec);
215
216#ifdef RB_THREAD_LOCAL_SPECIFIER
217 rb_current_ec_set(NULL);
218#else
219 native_tls_set(ruby_current_ec_key, NULL);
220#endif
221}
222#endif
223
224bool
225rb_gc_event_hook_required_p(rb_event_flag_t event)
226{
227 return ruby_vm_event_flags & event;
228}
229
230void
231rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
232{
233 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
234
235 rb_execution_context_t *ec = GET_EC();
236 if (!ec->cfp) return;
237
238 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
239}
240
241void *
242rb_gc_get_objspace(void)
243{
244 return GET_VM()->gc.objspace;
245}
246
247
248void
249rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250{
251 rb_ractor_t *r = NULL;
252 if (RB_LIKELY(ruby_single_main_ractor)) {
253 GC_ASSERT(
254 ccan_list_empty(&GET_VM()->ractor.set) ||
255 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
256 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
257 );
258
259 func(ruby_single_main_ractor->newobj_cache, data);
260 }
261 else {
262 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
263 func(r->newobj_cache, data);
264 }
265 }
266}
267
268void
269rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
270{
271 volatile struct {
272 VALUE errinfo;
273 VALUE final;
274 rb_control_frame_t *cfp;
275 VALUE *sp;
276 long finished;
277 } saved;
278
279 rb_execution_context_t * volatile ec = GET_EC();
280#define RESTORE_FINALIZER() (\
281 ec->cfp = saved.cfp, \
282 ec->cfp->sp = saved.sp, \
283 ec->errinfo = saved.errinfo)
284
285 saved.errinfo = ec->errinfo;
286 saved.cfp = ec->cfp;
287 saved.sp = ec->cfp->sp;
288 saved.finished = 0;
289 saved.final = Qundef;
290
291 EC_PUSH_TAG(ec);
292 enum ruby_tag_type state = EC_EXEC_TAG();
293 if (state != TAG_NONE) {
294 ++saved.finished; /* skip failed finalizer */
295
296 VALUE failed_final = saved.final;
297 saved.final = Qundef;
298 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
299 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
300 rb_ec_error_print(ec, ec->errinfo);
301 }
302 }
303
304 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
305 saved.final = callback(i, data);
306 rb_check_funcall(saved.final, idCall, 1, &objid);
307 }
308 EC_POP_TAG();
309#undef RESTORE_FINALIZER
310}
311
312void
313rb_gc_set_pending_interrupt(void)
314{
315 rb_execution_context_t *ec = GET_EC();
316 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
317}
318
319void
320rb_gc_unset_pending_interrupt(void)
321{
322 rb_execution_context_t *ec = GET_EC();
323 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
324}
325
326bool
327rb_gc_multi_ractor_p(void)
328{
329 return rb_multi_ractor_p();
330}
331
332bool rb_obj_is_main_ractor(VALUE gv);
333
334bool
335rb_gc_shutdown_call_finalizer_p(VALUE obj)
336{
337 switch (BUILTIN_TYPE(obj)) {
338 case T_DATA:
339 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
340 if (rb_obj_is_thread(obj)) return false;
341 if (rb_obj_is_mutex(obj)) return false;
342 if (rb_obj_is_fiber(obj)) return false;
343 if (rb_obj_is_main_ractor(obj)) return false;
344
345 return true;
346
347 case T_FILE:
348 return true;
349
350 case T_SYMBOL:
351 if (RSYMBOL(obj)->fstr &&
352 (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
353 BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
354 RSYMBOL(obj)->fstr = 0;
355 }
356 return true;
357
358 case T_NONE:
359 return false;
360
361 default:
362 return ruby_free_at_exit_p();
363 }
364}
365
366uint32_t
367rb_gc_get_shape(VALUE obj)
368{
369 return (uint32_t)rb_shape_get_shape_id(obj);
370}
371
372void
373rb_gc_set_shape(VALUE obj, uint32_t shape_id)
374{
375 rb_shape_set_shape_id(obj, (uint32_t)shape_id);
376}
377
378uint32_t
379rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
380{
381 rb_shape_t *orig_shape = rb_shape_get_shape(obj);
382
383 if (rb_shape_obj_too_complex(obj)) return (uint32_t)OBJ_TOO_COMPLEX_SHAPE_ID;
384
385 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)(heap_id + FIRST_T_OBJECT_SHAPE_ID));
386 rb_shape_t *new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
387
388 if (!new_shape) return 0;
389
390 return (uint32_t)rb_shape_id(new_shape);
391}
392
393void rb_vm_update_references(void *ptr);
394
395#define rb_setjmp(env) RUBY_SETJMP(env)
396#define rb_jmp_buf rb_jmpbuf_t
397#undef rb_data_object_wrap
398
399#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
400#define MAP_ANONYMOUS MAP_ANON
401#endif
402
403#define unless_objspace(objspace) \
404 void *objspace; \
405 rb_vm_t *unless_objspace_vm = GET_VM(); \
406 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
407 else /* return; or objspace will be warned uninitialized */
408
409#define RMOVED(obj) ((struct RMoved *)(obj))
410
411#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
412 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
413 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
414 } \
415} while (0)
416
417#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
418
419#if RUBY_MARK_FREE_DEBUG
420int ruby_gc_debug_indent = 0;
421#endif
422
423#ifndef RGENGC_OBJ_INFO
424# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
425#endif
426
427#ifndef CALC_EXACT_MALLOC_SIZE
428# define CALC_EXACT_MALLOC_SIZE 0
429#endif
430
432
433static size_t malloc_offset = 0;
434#if defined(HAVE_MALLOC_USABLE_SIZE)
435static size_t
436gc_compute_malloc_offset(void)
437{
438 // Different allocators use different metadata storage strategies which result in different
439 // ideal sizes.
440 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
441 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
442 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
443 // waste memory.
444 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
445 // no wasted memory.
446 size_t offset = 0;
447 for (offset = 0; offset <= 16; offset += 8) {
448 size_t allocated = (64 - offset);
449 void *test_ptr = malloc(allocated);
450 size_t wasted = malloc_usable_size(test_ptr) - allocated;
451 free(test_ptr);
452
453 if (wasted == 0) {
454 return offset;
455 }
456 }
457 return 0;
458}
459#else
460static size_t
461gc_compute_malloc_offset(void)
462{
463 // If we don't have malloc_usable_size, we use powers of 2.
464 return 0;
465}
466#endif
467
468size_t
469rb_malloc_grow_capa(size_t current, size_t type_size)
470{
471 size_t current_capacity = current;
472 if (current_capacity < 4) {
473 current_capacity = 4;
474 }
475 current_capacity *= type_size;
476
477 // We double the current capacity.
478 size_t new_capacity = (current_capacity * 2);
479
480 // And round up to the next power of 2 if it's not already one.
481 if (rb_popcount64(new_capacity) != 1) {
482 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
483 }
484
485 new_capacity -= malloc_offset;
486 new_capacity /= type_size;
487 if (current > new_capacity) {
488 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
489 }
490 RUBY_ASSERT(new_capacity > current);
491 return new_capacity;
492}
493
494static inline struct rbimpl_size_mul_overflow_tag
495size_add_overflow(size_t x, size_t y)
496{
497 size_t z;
498 bool p;
499#if 0
500
501#elif defined(ckd_add)
502 p = ckd_add(&z, x, y);
503
504#elif __has_builtin(__builtin_add_overflow)
505 p = __builtin_add_overflow(x, y, &z);
506
507#elif defined(DSIZE_T)
508 RB_GNUC_EXTENSION DSIZE_T dx = x;
509 RB_GNUC_EXTENSION DSIZE_T dy = y;
510 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
511 p = dz > SIZE_MAX;
512 z = (size_t)dz;
513
514#else
515 z = x + y;
516 p = z < y;
517
518#endif
519 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
520}
521
522static inline struct rbimpl_size_mul_overflow_tag
523size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
524{
525 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
526 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
527 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
528}
529
530static inline struct rbimpl_size_mul_overflow_tag
531size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
532{
533 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
534 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
535 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
536 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
537}
538
539PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
540
541static inline size_t
542size_mul_or_raise(size_t x, size_t y, VALUE exc)
543{
544 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
545 if (LIKELY(!t.left)) {
546 return t.right;
547 }
548 else if (rb_during_gc()) {
549 rb_memerror(); /* or...? */
550 }
551 else {
552 gc_raise(
553 exc,
554 "integer overflow: %"PRIuSIZE
555 " * %"PRIuSIZE
556 " > %"PRIuSIZE,
557 x, y, (size_t)SIZE_MAX);
558 }
559}
560
561size_t
562rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
563{
564 return size_mul_or_raise(x, y, exc);
565}
566
567static inline size_t
568size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
569{
570 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
571 if (LIKELY(!t.left)) {
572 return t.right;
573 }
574 else if (rb_during_gc()) {
575 rb_memerror(); /* or...? */
576 }
577 else {
578 gc_raise(
579 exc,
580 "integer overflow: %"PRIuSIZE
581 " * %"PRIuSIZE
582 " + %"PRIuSIZE
583 " > %"PRIuSIZE,
584 x, y, z, (size_t)SIZE_MAX);
585 }
586}
587
588size_t
589rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
590{
591 return size_mul_add_or_raise(x, y, z, exc);
592}
593
594static inline size_t
595size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
596{
597 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
598 if (LIKELY(!t.left)) {
599 return t.right;
600 }
601 else if (rb_during_gc()) {
602 rb_memerror(); /* or...? */
603 }
604 else {
605 gc_raise(
606 exc,
607 "integer overflow: %"PRIdSIZE
608 " * %"PRIdSIZE
609 " + %"PRIdSIZE
610 " * %"PRIdSIZE
611 " > %"PRIdSIZE,
612 x, y, z, w, (size_t)SIZE_MAX);
613 }
614}
615
616#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
617/* trick the compiler into thinking a external signal handler uses this */
618volatile VALUE rb_gc_guarded_val;
619volatile VALUE *
620rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
621{
622 rb_gc_guarded_val = val;
623
624 return ptr;
625}
626#endif
627
628static const char *obj_type_name(VALUE obj);
629#include "gc/default/default.c"
630
631#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
632# error "Modular GC requires dlopen"
633#elif USE_MODULAR_GC
634#include <dlfcn.h>
635
636typedef struct gc_function_map {
637 // Bootup
638 void *(*objspace_alloc)(void);
639 void (*objspace_init)(void *objspace_ptr);
640 void (*objspace_free)(void *objspace_ptr);
641 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
642 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
643 void (*set_params)(void *objspace_ptr);
644 void (*init)(void);
645 size_t *(*heap_sizes)(void *objspace_ptr);
646 // Shutdown
647 void (*shutdown_free_objects)(void *objspace_ptr);
648 // GC
649 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
650 bool (*during_gc_p)(void *objspace_ptr);
651 void (*prepare_heap)(void *objspace_ptr);
652 void (*gc_enable)(void *objspace_ptr);
653 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
654 bool (*gc_enabled_p)(void *objspace_ptr);
655 VALUE (*config_get)(void *objpace_ptr);
656 void (*config_set)(void *objspace_ptr, VALUE hash);
657 void (*stress_set)(void *objspace_ptr, VALUE flag);
658 VALUE (*stress_get)(void *objspace_ptr);
659 // Object allocation
660 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size);
661 size_t (*obj_slot_size)(VALUE obj);
662 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
663 bool (*size_allocatable_p)(size_t size);
664 // Malloc
665 void *(*malloc)(void *objspace_ptr, size_t size);
666 void *(*calloc)(void *objspace_ptr, size_t size);
667 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
668 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
669 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
670 // Marking
671 void (*mark)(void *objspace_ptr, VALUE obj);
672 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
673 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
674 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
675 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
676 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
677 // Compaction
678 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
679 VALUE (*location)(void *objspace_ptr, VALUE value);
680 // Write barriers
681 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
682 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
683 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
684 // Heap walking
685 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
686 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
687 // Finalizers
688 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
689 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
690 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
691 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
692 void (*shutdown_call_finalizer)(void *objspace_ptr);
693 // Object ID
694 VALUE (*object_id)(void *objspace_ptr, VALUE obj);
695 VALUE (*object_id_to_ref)(void *objspace_ptr, VALUE object_id);
696 // Forking
697 void (*before_fork)(void *objspace_ptr);
698 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
699 // Statistics
700 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
701 bool (*get_measure_total_time)(void *objspace_ptr);
702 unsigned long long (*get_total_time)(void *objspace_ptr);
703 size_t (*gc_count)(void *objspace_ptr);
704 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
705 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
706 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
707 const char *(*active_gc_name)(void);
708 // Miscellaneous
709 size_t (*obj_flags)(void *objspace_ptr, VALUE obj, ID* flags, size_t max);
710 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
711 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
712 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
713 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
714
715 bool modular_gc_loaded_p;
716} rb_gc_function_map_t;
717
718static rb_gc_function_map_t rb_gc_functions;
719
720# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
721# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
722
723static void
724ruby_modular_gc_init(void)
725{
726 // Assert that the directory path ends with a /
727 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
728
729 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
730
731 rb_gc_function_map_t gc_functions = { 0 };
732
733 char *gc_so_path = NULL;
734 void *handle = NULL;
735 if (gc_so_file) {
736 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
737 * not load a shared object outside of the directory. */
738 for (size_t i = 0; i < strlen(gc_so_file); i++) {
739 char c = gc_so_file[i];
740 if (isalnum(c)) continue;
741 switch (c) {
742 case '-':
743 case '_':
744 break;
745 default:
746 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
747 exit(1);
748 }
749 }
750
751 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
752#ifdef LOAD_RELATIVE
753 Dl_info dli;
754 size_t prefix_len = 0;
755 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
756 const char *base = strrchr(dli.dli_fname, '/');
757 if (base) {
758 size_t tail = 0;
759# define end_with_p(lit) \
760 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
761 memcmp(base - tail, lit, tail) == 0)
762
763 prefix_len = base - dli.dli_fname;
764 if (end_with_p("/bin") || end_with_p("/lib")) {
765 prefix_len -= tail;
766 }
767 prefix_len += MODULAR_GC_DIR[0] != '/';
768 gc_so_path_size += prefix_len;
769 }
770 }
771#endif
772 gc_so_path = alloca(gc_so_path_size);
773 {
774 size_t gc_so_path_idx = 0;
775#define GC_SO_PATH_APPEND(str) do { \
776 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
777} while (0)
778#ifdef LOAD_RELATIVE
779 if (prefix_len > 0) {
780 memcpy(gc_so_path, dli.dli_fname, prefix_len);
781 gc_so_path_idx = prefix_len;
782 }
783#endif
784 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
785 GC_SO_PATH_APPEND(gc_so_file);
786 GC_SO_PATH_APPEND(DLEXT);
787 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
788#undef GC_SO_PATH_APPEND
789 }
790
791 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
792 if (!handle) {
793 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
794 exit(1);
795 }
796
797 gc_functions.modular_gc_loaded_p = true;
798 }
799
800# define load_modular_gc_func(name) do { \
801 if (handle) { \
802 const char *func_name = "rb_gc_impl_" #name; \
803 gc_functions.name = dlsym(handle, func_name); \
804 if (!gc_functions.name) { \
805 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
806 exit(1); \
807 } \
808 } \
809 else { \
810 gc_functions.name = rb_gc_impl_##name; \
811 } \
812} while (0)
813
814 // Bootup
815 load_modular_gc_func(objspace_alloc);
816 load_modular_gc_func(objspace_init);
817 load_modular_gc_func(objspace_free);
818 load_modular_gc_func(ractor_cache_alloc);
819 load_modular_gc_func(ractor_cache_free);
820 load_modular_gc_func(set_params);
821 load_modular_gc_func(init);
822 load_modular_gc_func(heap_sizes);
823 // Shutdown
824 load_modular_gc_func(shutdown_free_objects);
825 // GC
826 load_modular_gc_func(start);
827 load_modular_gc_func(during_gc_p);
828 load_modular_gc_func(prepare_heap);
829 load_modular_gc_func(gc_enable);
830 load_modular_gc_func(gc_disable);
831 load_modular_gc_func(gc_enabled_p);
832 load_modular_gc_func(config_set);
833 load_modular_gc_func(config_get);
834 load_modular_gc_func(stress_set);
835 load_modular_gc_func(stress_get);
836 // Object allocation
837 load_modular_gc_func(new_obj);
838 load_modular_gc_func(obj_slot_size);
839 load_modular_gc_func(heap_id_for_size);
840 load_modular_gc_func(size_allocatable_p);
841 // Malloc
842 load_modular_gc_func(malloc);
843 load_modular_gc_func(calloc);
844 load_modular_gc_func(realloc);
845 load_modular_gc_func(free);
846 load_modular_gc_func(adjust_memory_usage);
847 // Marking
848 load_modular_gc_func(mark);
849 load_modular_gc_func(mark_and_move);
850 load_modular_gc_func(mark_and_pin);
851 load_modular_gc_func(mark_maybe);
852 load_modular_gc_func(mark_weak);
853 load_modular_gc_func(remove_weak);
854 // Compaction
855 load_modular_gc_func(object_moved_p);
856 load_modular_gc_func(location);
857 // Write barriers
858 load_modular_gc_func(writebarrier);
859 load_modular_gc_func(writebarrier_unprotect);
860 load_modular_gc_func(writebarrier_remember);
861 // Heap walking
862 load_modular_gc_func(each_objects);
863 load_modular_gc_func(each_object);
864 // Finalizers
865 load_modular_gc_func(make_zombie);
866 load_modular_gc_func(define_finalizer);
867 load_modular_gc_func(undefine_finalizer);
868 load_modular_gc_func(copy_finalizer);
869 load_modular_gc_func(shutdown_call_finalizer);
870 // Object ID
871 load_modular_gc_func(object_id);
872 load_modular_gc_func(object_id_to_ref);
873 // Forking
874 load_modular_gc_func(before_fork);
875 load_modular_gc_func(after_fork);
876 // Statistics
877 load_modular_gc_func(set_measure_total_time);
878 load_modular_gc_func(get_measure_total_time);
879 load_modular_gc_func(get_total_time);
880 load_modular_gc_func(gc_count);
881 load_modular_gc_func(latest_gc_info);
882 load_modular_gc_func(stat);
883 load_modular_gc_func(stat_heap);
884 load_modular_gc_func(active_gc_name);
885 // Miscellaneous
886 load_modular_gc_func(obj_flags);
887 load_modular_gc_func(pointer_to_heap_p);
888 load_modular_gc_func(garbage_object_p);
889 load_modular_gc_func(set_event_hook);
890 load_modular_gc_func(copy_attributes);
891
892# undef load_modular_gc_func
893
894 rb_gc_functions = gc_functions;
895}
896
897// Bootup
898# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
899# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
900# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
901# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
902# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
903# define rb_gc_impl_set_params rb_gc_functions.set_params
904# define rb_gc_impl_init rb_gc_functions.init
905# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
906// Shutdown
907# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
908// GC
909# define rb_gc_impl_start rb_gc_functions.start
910# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
911# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
912# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
913# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
914# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
915# define rb_gc_impl_config_get rb_gc_functions.config_get
916# define rb_gc_impl_config_set rb_gc_functions.config_set
917# define rb_gc_impl_stress_set rb_gc_functions.stress_set
918# define rb_gc_impl_stress_get rb_gc_functions.stress_get
919// Object allocation
920# define rb_gc_impl_new_obj rb_gc_functions.new_obj
921# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
922# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
923# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
924// Malloc
925# define rb_gc_impl_malloc rb_gc_functions.malloc
926# define rb_gc_impl_calloc rb_gc_functions.calloc
927# define rb_gc_impl_realloc rb_gc_functions.realloc
928# define rb_gc_impl_free rb_gc_functions.free
929# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
930// Marking
931# define rb_gc_impl_mark rb_gc_functions.mark
932# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
933# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
934# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
935# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
936# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
937// Compaction
938# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
939# define rb_gc_impl_location rb_gc_functions.location
940// Write barriers
941# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
942# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
943# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
944// Heap walking
945# define rb_gc_impl_each_objects rb_gc_functions.each_objects
946# define rb_gc_impl_each_object rb_gc_functions.each_object
947// Finalizers
948# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
949# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
950# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
951# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
952# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
953// Object ID
954# define rb_gc_impl_object_id rb_gc_functions.object_id
955# define rb_gc_impl_object_id_to_ref rb_gc_functions.object_id_to_ref
956// Forking
957# define rb_gc_impl_before_fork rb_gc_functions.before_fork
958# define rb_gc_impl_after_fork rb_gc_functions.after_fork
959// Statistics
960# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
961# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
962# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
963# define rb_gc_impl_gc_count rb_gc_functions.gc_count
964# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
965# define rb_gc_impl_stat rb_gc_functions.stat
966# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
967# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
968// Miscellaneous
969# define rb_gc_impl_obj_flags rb_gc_functions.obj_flags
970# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
971# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
972# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
973# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
974#endif
975
976#ifdef RUBY_ASAN_ENABLED
977static void
978asan_death_callback(void)
979{
980 if (GET_VM()) {
981 rb_bug_without_die("ASAN error");
982 }
983}
984#endif
985
986static VALUE initial_stress = Qfalse;
987
988void *
989rb_objspace_alloc(void)
990{
991#if USE_MODULAR_GC
992 ruby_modular_gc_init();
993#endif
994
995 void *objspace = rb_gc_impl_objspace_alloc();
996 ruby_current_vm_ptr->gc.objspace = objspace;
997
998 rb_gc_impl_objspace_init(objspace);
999 rb_gc_impl_stress_set(objspace, initial_stress);
1000
1001#ifdef RUBY_ASAN_ENABLED
1002 __sanitizer_set_death_callback(asan_death_callback);
1003#endif
1004
1005 return objspace;
1006}
1007
1008void
1009rb_objspace_free(void *objspace)
1010{
1011 rb_gc_impl_objspace_free(objspace);
1012}
1013
1014size_t
1015rb_gc_obj_slot_size(VALUE obj)
1016{
1017 return rb_gc_impl_obj_slot_size(obj);
1018}
1019
1020static inline void
1021gc_validate_pc(void) {
1022#if RUBY_DEBUG
1023 rb_execution_context_t *ec = GET_EC();
1024 const rb_control_frame_t *cfp = ec->cfp;
1025 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
1026 RUBY_ASSERT(cfp->pc >= ISEQ_BODY(cfp->iseq)->iseq_encoded);
1027 RUBY_ASSERT(cfp->pc <= ISEQ_BODY(cfp->iseq)->iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size);
1028 }
1029#endif
1030}
1031
1032static inline VALUE
1033newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t size)
1034{
1035 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, v1, v2, v3, wb_protected, size);
1036
1037 gc_validate_pc();
1038
1039 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1040 unsigned int lev;
1041 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
1042 {
1043 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1044
1045 /* We must disable GC here because the callback could call xmalloc
1046 * which could potentially trigger a GC, and a lot of code is unsafe
1047 * to trigger a GC right after an object has been allocated because
1048 * they perform initialization for the object and assume that the
1049 * GC does not trigger before then. */
1050 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1051 {
1052 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1053 }
1054 if (!gc_disabled) rb_gc_enable();
1055 }
1056 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
1057 }
1058
1059 return obj;
1060}
1061
1062VALUE
1063rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1064{
1065 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1066 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
1067}
1068
1069VALUE
1070rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1071{
1072 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1073 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
1074}
1075
1076#define UNEXPECTED_NODE(func) \
1077 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1078 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1079
1080static inline void
1081rb_data_object_check(VALUE klass)
1082{
1083 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1084 rb_undef_alloc_func(klass);
1085 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1086 }
1087}
1088
1089VALUE
1090rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1091{
1093 if (klass) rb_data_object_check(klass);
1094 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, !dmark, sizeof(struct RTypedData));
1095}
1096
1097VALUE
1098rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1099{
1100 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1101 DATA_PTR(obj) = xcalloc(1, size);
1102 return obj;
1103}
1104
1105static VALUE
1106typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1107{
1108 RBIMPL_NONNULL_ARG(type);
1109 if (klass) rb_data_object_check(klass);
1110 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1111 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)type, 1 | typed_flag, (VALUE)datap, wb_protected, size);
1112}
1113
1114VALUE
1115rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
1116{
1117 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1118 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1119 }
1120
1121 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1122}
1123
1124VALUE
1125rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
1126{
1127 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1128 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1129 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1130 }
1131
1132 size_t embed_size = offsetof(struct RTypedData, data) + size;
1133 if (rb_gc_size_allocatable_p(embed_size)) {
1134 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1135 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1136 return obj;
1137 }
1138 }
1139
1140 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1141 DATA_PTR(obj) = xcalloc(1, size);
1142 return obj;
1143}
1144
1145static size_t
1146rb_objspace_data_type_memsize(VALUE obj)
1147{
1148 size_t size = 0;
1149 if (RTYPEDDATA_P(obj)) {
1150 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1151 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1152
1153 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1154#ifdef HAVE_MALLOC_USABLE_SIZE
1155 size += malloc_usable_size((void *)ptr);
1156#endif
1157 }
1158
1159 if (ptr && type->function.dsize) {
1160 size += type->function.dsize(ptr);
1161 }
1162 }
1163
1164 return size;
1165}
1166
1167const char *
1168rb_objspace_data_type_name(VALUE obj)
1169{
1170 if (RTYPEDDATA_P(obj)) {
1171 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1172 }
1173 else {
1174 return 0;
1175 }
1176}
1177
1178static enum rb_id_table_iterator_result
1179cvar_table_free_i(VALUE value, void *ctx)
1180{
1181 xfree((void *)value);
1182 return ID_TABLE_CONTINUE;
1183}
1184
1185static inline void
1186make_io_zombie(void *objspace, VALUE obj)
1187{
1188 rb_io_t *fptr = RFILE(obj)->fptr;
1189 rb_gc_impl_make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
1190}
1191
1192static bool
1193rb_data_free(void *objspace, VALUE obj)
1194{
1195 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1196 if (data) {
1197 int free_immediately = false;
1198 void (*dfree)(void *);
1199
1200 if (RTYPEDDATA_P(obj)) {
1201 free_immediately = (RTYPEDDATA(obj)->type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1202 dfree = RTYPEDDATA(obj)->type->function.dfree;
1203 }
1204 else {
1205 dfree = RDATA(obj)->dfree;
1206 }
1207
1208 if (dfree) {
1209 if (dfree == RUBY_DEFAULT_FREE) {
1210 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1211 xfree(data);
1212 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1213 }
1214 }
1215 else if (free_immediately) {
1216 (*dfree)(data);
1217 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1218 xfree(data);
1219 }
1220
1221 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1222 }
1223 else {
1224 rb_gc_impl_make_zombie(rb_gc_get_objspace(), obj, dfree, data);
1225 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1226 return FALSE;
1227 }
1228 }
1229 else {
1230 RB_DEBUG_COUNTER_INC(obj_data_empty);
1231 }
1232 }
1233
1234 return true;
1235}
1236
1237void
1238rb_gc_obj_free_vm_weak_references(VALUE obj)
1239{
1240 if (FL_TEST(obj, FL_EXIVAR)) {
1242 FL_UNSET(obj, FL_EXIVAR);
1243 }
1244
1245 switch (BUILTIN_TYPE(obj)) {
1246 case T_STRING:
1247 if (FL_TEST(obj, RSTRING_FSTR)) {
1248 st_data_t fstr = (st_data_t)obj;
1249 st_delete(rb_vm_fstring_table(), &fstr, NULL);
1250 RB_DEBUG_COUNTER_INC(obj_str_fstr);
1251
1252 FL_UNSET(obj, RSTRING_FSTR);
1253 }
1254 break;
1255 case T_SYMBOL:
1256 rb_gc_free_dsymbol(obj);
1257 break;
1258 case T_IMEMO:
1259 switch (imemo_type(obj)) {
1260 case imemo_callinfo:
1261 rb_vm_ci_free((const struct rb_callinfo *)obj);
1262 break;
1263 case imemo_ment:
1264 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
1265 break;
1266 default:
1267 break;
1268 }
1269 break;
1270 default:
1271 break;
1272 }
1273}
1274
1275bool
1276rb_gc_obj_free(void *objspace, VALUE obj)
1277{
1278 RB_DEBUG_COUNTER_INC(obj_free);
1279
1280 switch (BUILTIN_TYPE(obj)) {
1281 case T_NIL:
1282 case T_FIXNUM:
1283 case T_TRUE:
1284 case T_FALSE:
1285 rb_bug("obj_free() called for broken object");
1286 break;
1287 default:
1288 break;
1289 }
1290
1291 switch (BUILTIN_TYPE(obj)) {
1292 case T_OBJECT:
1293 if (rb_shape_obj_too_complex(obj)) {
1294 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1295 st_free_table(ROBJECT_IV_HASH(obj));
1296 }
1297 else if (RBASIC(obj)->flags & ROBJECT_EMBED) {
1298 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1299 }
1300 else {
1301 xfree(ROBJECT(obj)->as.heap.ivptr);
1302 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1303 }
1304 break;
1305 case T_MODULE:
1306 case T_CLASS:
1307 rb_id_table_free(RCLASS_M_TBL(obj));
1308 rb_cc_table_free(obj);
1309 if (rb_shape_obj_too_complex(obj)) {
1310 st_free_table((st_table *)RCLASS_IVPTR(obj));
1311 }
1312 else {
1313 xfree(RCLASS_IVPTR(obj));
1314 }
1315
1316 if (RCLASS_CONST_TBL(obj)) {
1317 rb_free_const_table(RCLASS_CONST_TBL(obj));
1318 }
1319 if (RCLASS_CVC_TBL(obj)) {
1320 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
1321 rb_id_table_free(RCLASS_CVC_TBL(obj));
1322 }
1323 rb_class_remove_subclass_head(obj);
1324 rb_class_remove_from_module_subclasses(obj);
1325 rb_class_remove_from_super_subclasses(obj);
1326 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1327 xfree(RCLASS_SUPERCLASSES(obj));
1328 }
1329
1330 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1331 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1332 break;
1333 case T_STRING:
1334 rb_str_free(obj);
1335 break;
1336 case T_ARRAY:
1337 rb_ary_free(obj);
1338 break;
1339 case T_HASH:
1340#if USE_DEBUG_COUNTER
1341 switch (RHASH_SIZE(obj)) {
1342 case 0:
1343 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1344 break;
1345 case 1:
1346 RB_DEBUG_COUNTER_INC(obj_hash_1);
1347 break;
1348 case 2:
1349 RB_DEBUG_COUNTER_INC(obj_hash_2);
1350 break;
1351 case 3:
1352 RB_DEBUG_COUNTER_INC(obj_hash_3);
1353 break;
1354 case 4:
1355 RB_DEBUG_COUNTER_INC(obj_hash_4);
1356 break;
1357 case 5:
1358 case 6:
1359 case 7:
1360 case 8:
1361 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1362 break;
1363 default:
1364 GC_ASSERT(RHASH_SIZE(obj) > 8);
1365 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1366 }
1367
1368 if (RHASH_AR_TABLE_P(obj)) {
1369 if (RHASH_AR_TABLE(obj) == NULL) {
1370 RB_DEBUG_COUNTER_INC(obj_hash_null);
1371 }
1372 else {
1373 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1374 }
1375 }
1376 else {
1377 RB_DEBUG_COUNTER_INC(obj_hash_st);
1378 }
1379#endif
1380
1381 rb_hash_free(obj);
1382 break;
1383 case T_REGEXP:
1384 if (RREGEXP(obj)->ptr) {
1385 onig_free(RREGEXP(obj)->ptr);
1386 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1387 }
1388 break;
1389 case T_DATA:
1390 if (!rb_data_free(objspace, obj)) return false;
1391 break;
1392 case T_MATCH:
1393 {
1394 rb_matchext_t *rm = RMATCH_EXT(obj);
1395#if USE_DEBUG_COUNTER
1396 if (rm->regs.num_regs >= 8) {
1397 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1398 }
1399 else if (rm->regs.num_regs >= 4) {
1400 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1401 }
1402 else if (rm->regs.num_regs >= 1) {
1403 RB_DEBUG_COUNTER_INC(obj_match_under4);
1404 }
1405#endif
1406 onig_region_free(&rm->regs, 0);
1407 xfree(rm->char_offset);
1408
1409 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1410 }
1411 break;
1412 case T_FILE:
1413 if (RFILE(obj)->fptr) {
1414 make_io_zombie(objspace, obj);
1415 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1416 return FALSE;
1417 }
1418 break;
1419 case T_RATIONAL:
1420 RB_DEBUG_COUNTER_INC(obj_rational);
1421 break;
1422 case T_COMPLEX:
1423 RB_DEBUG_COUNTER_INC(obj_complex);
1424 break;
1425 case T_MOVED:
1426 break;
1427 case T_ICLASS:
1428 /* Basically , T_ICLASS shares table with the module */
1429 if (RICLASS_OWNS_M_TBL_P(obj)) {
1430 /* Method table is not shared for origin iclasses of classes */
1431 rb_id_table_free(RCLASS_M_TBL(obj));
1432 }
1433 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
1434 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
1435 }
1436 rb_class_remove_subclass_head(obj);
1437 rb_cc_table_free(obj);
1438 rb_class_remove_from_module_subclasses(obj);
1439 rb_class_remove_from_super_subclasses(obj);
1440
1441 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1442 break;
1443
1444 case T_FLOAT:
1445 RB_DEBUG_COUNTER_INC(obj_float);
1446 break;
1447
1448 case T_BIGNUM:
1449 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1450 xfree(BIGNUM_DIGITS(obj));
1451 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1452 }
1453 else {
1454 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1455 }
1456 break;
1457
1458 case T_NODE:
1459 UNEXPECTED_NODE(obj_free);
1460 break;
1461
1462 case T_STRUCT:
1463 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1464 RSTRUCT(obj)->as.heap.ptr == NULL) {
1465 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1466 }
1467 else {
1468 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1469 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1470 }
1471 break;
1472
1473 case T_SYMBOL:
1474 RB_DEBUG_COUNTER_INC(obj_symbol);
1475 break;
1476
1477 case T_IMEMO:
1478 rb_imemo_free((VALUE)obj);
1479 break;
1480
1481 default:
1482 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1483 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1484 }
1485
1486 if (FL_TEST(obj, FL_FINALIZE)) {
1487 rb_gc_impl_make_zombie(rb_gc_get_objspace(), obj, 0, 0);
1488 return FALSE;
1489 }
1490 else {
1491 return TRUE;
1492 }
1493}
1494
1495void
1496rb_objspace_set_event_hook(const rb_event_flag_t event)
1497{
1498 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1499}
1500
1501static int
1502internal_object_p(VALUE obj)
1503{
1504 void *ptr = asan_unpoison_object_temporary(obj);
1505
1506 if (RBASIC(obj)->flags) {
1507 switch (BUILTIN_TYPE(obj)) {
1508 case T_NODE:
1509 UNEXPECTED_NODE(internal_object_p);
1510 break;
1511 case T_NONE:
1512 case T_MOVED:
1513 case T_IMEMO:
1514 case T_ICLASS:
1515 case T_ZOMBIE:
1516 break;
1517 case T_CLASS:
1518 if (!RBASIC(obj)->klass) break;
1519 if (RCLASS_SINGLETON_P(obj)) {
1520 return rb_singleton_class_internal_p(obj);
1521 }
1522 return 0;
1523 default:
1524 if (!RBASIC(obj)->klass) break;
1525 return 0;
1526 }
1527 }
1528 if (ptr || !RBASIC(obj)->flags) {
1529 rb_asan_poison_object(obj);
1530 }
1531 return 1;
1532}
1533
1534int
1535rb_objspace_internal_object_p(VALUE obj)
1536{
1537 return internal_object_p(obj);
1538}
1539
1541 size_t num;
1542 VALUE of;
1543};
1544
1545static int
1546os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1547{
1548 struct os_each_struct *oes = (struct os_each_struct *)data;
1549
1550 VALUE v = (VALUE)vstart;
1551 for (; v != (VALUE)vend; v += stride) {
1552 if (!internal_object_p(v)) {
1553 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1554 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1555 rb_yield(v);
1556 oes->num++;
1557 }
1558 }
1559 }
1560 }
1561
1562 return 0;
1563}
1564
1565static VALUE
1566os_obj_of(VALUE of)
1567{
1568 struct os_each_struct oes;
1569
1570 oes.num = 0;
1571 oes.of = of;
1572 rb_objspace_each_objects(os_obj_of_i, &oes);
1573 return SIZET2NUM(oes.num);
1574}
1575
1576/*
1577 * call-seq:
1578 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1579 * ObjectSpace.each_object([module]) -> an_enumerator
1580 *
1581 * Calls the block once for each living, nonimmediate object in this
1582 * Ruby process. If <i>module</i> is specified, calls the block
1583 * for only those classes or modules that match (or are a subclass of)
1584 * <i>module</i>. Returns the number of objects found. Immediate
1585 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1586 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1587 * never returned. In the example below, #each_object returns both
1588 * the numbers we defined and several constants defined in the Math
1589 * module.
1590 *
1591 * If no block is given, an enumerator is returned instead.
1592 *
1593 * a = 102.7
1594 * b = 95 # Won't be returned
1595 * c = 12345678987654321
1596 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1597 * puts "Total count: #{count}"
1598 *
1599 * <em>produces:</em>
1600 *
1601 * 12345678987654321
1602 * 102.7
1603 * 2.71828182845905
1604 * 3.14159265358979
1605 * 2.22044604925031e-16
1606 * 1.7976931348623157e+308
1607 * 2.2250738585072e-308
1608 * Total count: 7
1609 *
1610 */
1611
1612static VALUE
1613os_each_obj(int argc, VALUE *argv, VALUE os)
1614{
1615 VALUE of;
1616
1617 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1618 RETURN_ENUMERATOR(os, 1, &of);
1619 return os_obj_of(of);
1620}
1621
1622/*
1623 * call-seq:
1624 * ObjectSpace.undefine_finalizer(obj)
1625 *
1626 * Removes all finalizers for <i>obj</i>.
1627 *
1628 */
1629
1630static VALUE
1631undefine_final(VALUE os, VALUE obj)
1632{
1633 return rb_undefine_finalizer(obj);
1634}
1635
1636VALUE
1637rb_undefine_finalizer(VALUE obj)
1638{
1639 rb_check_frozen(obj);
1640
1641 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1642
1643 return obj;
1644}
1645
1646static void
1647should_be_callable(VALUE block)
1648{
1649 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1650 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1651 rb_obj_class(block));
1652 }
1653}
1654
1655static void
1656should_be_finalizable(VALUE obj)
1657{
1658 if (!FL_ABLE(obj)) {
1659 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1660 rb_obj_classname(obj));
1661 }
1662 rb_check_frozen(obj);
1663}
1664
1665void
1666rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1667{
1668 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1669}
1670
1671/*
1672 * call-seq:
1673 * ObjectSpace.define_finalizer(obj, aProc=proc())
1674 *
1675 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1676 * was destroyed. The object ID of the <i>obj</i> will be passed
1677 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1678 * method, make sure it can be called with a single argument.
1679 *
1680 * The return value is an array <code>[0, aProc]</code>.
1681 *
1682 * The two recommended patterns are to either create the finaliser proc
1683 * in a non-instance method where it can safely capture the needed state,
1684 * or to use a custom callable object that stores the needed state
1685 * explicitly as instance variables.
1686 *
1687 * class Foo
1688 * def initialize(data_needed_for_finalization)
1689 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1690 * end
1691 *
1692 * def self.create_finalizer(data_needed_for_finalization)
1693 * proc {
1694 * puts "finalizing #{data_needed_for_finalization}"
1695 * }
1696 * end
1697 * end
1698 *
1699 * class Bar
1700 * class Remover
1701 * def initialize(data_needed_for_finalization)
1702 * @data_needed_for_finalization = data_needed_for_finalization
1703 * end
1704 *
1705 * def call(id)
1706 * puts "finalizing #{@data_needed_for_finalization}"
1707 * end
1708 * end
1709 *
1710 * def initialize(data_needed_for_finalization)
1711 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1712 * end
1713 * end
1714 *
1715 * Note that if your finalizer references the object to be
1716 * finalized it will never be run on GC, although it will still be
1717 * run at exit. You will get a warning if you capture the object
1718 * to be finalized as the receiver of the finalizer.
1719 *
1720 * class CapturesSelf
1721 * def initialize(name)
1722 * ObjectSpace.define_finalizer(self, proc {
1723 * # this finalizer will only be run on exit
1724 * puts "finalizing #{name}"
1725 * })
1726 * end
1727 * end
1728 *
1729 * Also note that finalization can be unpredictable and is never guaranteed
1730 * to be run except on exit.
1731 */
1732
1733static VALUE
1734define_final(int argc, VALUE *argv, VALUE os)
1735{
1736 VALUE obj, block;
1737
1738 rb_scan_args(argc, argv, "11", &obj, &block);
1739 if (argc == 1) {
1740 block = rb_block_proc();
1741 }
1742
1743 if (rb_callable_receiver(block) == obj) {
1744 rb_warn("finalizer references object to be finalized");
1745 }
1746
1747 return rb_define_finalizer(obj, block);
1748}
1749
1750VALUE
1751rb_define_finalizer(VALUE obj, VALUE block)
1752{
1753 should_be_finalizable(obj);
1754 should_be_callable(block);
1755
1756 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1757
1758 block = rb_ary_new3(2, INT2FIX(0), block);
1759 OBJ_FREEZE(block);
1760 return block;
1761}
1762
1763void
1764rb_objspace_call_finalizer(void)
1765{
1766 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1767}
1768
1769void
1770rb_objspace_free_objects(void *objspace)
1771{
1772 rb_gc_impl_shutdown_free_objects(objspace);
1773}
1774
1775int
1776rb_objspace_garbage_object_p(VALUE obj)
1777{
1778 return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1779}
1780
1781bool
1782rb_gc_pointer_to_heap_p(VALUE obj)
1783{
1784 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1785}
1786
1787/*
1788 * call-seq:
1789 * ObjectSpace._id2ref(object_id) -> an_object
1790 *
1791 * Converts an object id to a reference to the object. May not be
1792 * called on an object id passed as a parameter to a finalizer.
1793 *
1794 * s = "I am a string" #=> "I am a string"
1795 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
1796 * r == s #=> true
1797 *
1798 * On multi-ractor mode, if the object is not shareable, it raises
1799 * RangeError.
1800 */
1801
1802static VALUE
1803id2ref(VALUE objid)
1804{
1805#if SIZEOF_LONG == SIZEOF_VOIDP
1806#define NUM2PTR(x) NUM2ULONG(x)
1807#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1808#define NUM2PTR(x) NUM2ULL(x)
1809#endif
1810 objid = rb_to_int(objid);
1811 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
1812 VALUE ptr = NUM2PTR(objid);
1813 if (SPECIAL_CONST_P(ptr)) {
1814 if (ptr == Qtrue) return Qtrue;
1815 if (ptr == Qfalse) return Qfalse;
1816 if (NIL_P(ptr)) return Qnil;
1817 if (FIXNUM_P(ptr)) return ptr;
1818 if (FLONUM_P(ptr)) return ptr;
1819
1820 if (SYMBOL_P(ptr)) {
1821 // Check that the symbol is valid
1822 if (rb_static_id_valid_p(SYM2ID(ptr))) {
1823 return ptr;
1824 }
1825 else {
1826 rb_raise(rb_eRangeError, "%p is not symbol id value", (void *)ptr);
1827 }
1828 }
1829
1830 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
1831 }
1832 }
1833
1834 VALUE obj = rb_gc_impl_object_id_to_ref(rb_gc_get_objspace(), objid);
1835 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
1836 return obj;
1837 }
1838 else {
1839 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
1840 }
1841}
1842
1843/* :nodoc: */
1844static VALUE
1845os_id2ref(VALUE os, VALUE objid)
1846{
1847 return id2ref(objid);
1848}
1849
1850static VALUE
1851rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(void *, VALUE))
1852{
1853 if (SPECIAL_CONST_P(obj)) {
1854#if SIZEOF_LONG == SIZEOF_VOIDP
1855 return LONG2NUM((SIGNED_VALUE)obj);
1856#else
1857 return LL2NUM((SIGNED_VALUE)obj);
1858#endif
1859 }
1860
1861 return get_heap_object_id(objspace, obj);
1862}
1863
1864static VALUE
1865nonspecial_obj_id(void *_objspace, VALUE obj)
1866{
1867#if SIZEOF_LONG == SIZEOF_VOIDP
1868 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
1869#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1870 return LL2NUM((SIGNED_VALUE)(obj) / 2);
1871#else
1872# error not supported
1873#endif
1874}
1875
1876VALUE
1877rb_memory_id(VALUE obj)
1878{
1879 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
1880}
1881
1882/*
1883 * Document-method: __id__
1884 * Document-method: object_id
1885 *
1886 * call-seq:
1887 * obj.__id__ -> integer
1888 * obj.object_id -> integer
1889 *
1890 * Returns an integer identifier for +obj+.
1891 *
1892 * The same number will be returned on all calls to +object_id+ for a given
1893 * object, and no two active objects will share an id.
1894 *
1895 * Note: that some objects of builtin classes are reused for optimization.
1896 * This is the case for immediate values and frozen string literals.
1897 *
1898 * BasicObject implements +__id__+, Kernel implements +object_id+.
1899 *
1900 * Immediate values are not passed by reference but are passed by value:
1901 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
1902 *
1903 * Object.new.object_id == Object.new.object_id # => false
1904 * (21 * 2).object_id == (21 * 2).object_id # => true
1905 * "hello".object_id == "hello".object_id # => false
1906 * "hi".freeze.object_id == "hi".freeze.object_id # => true
1907 */
1908
1909VALUE
1910rb_obj_id(VALUE obj)
1911{
1912 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
1913 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
1914 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
1915 * any immediates. */
1916 return rb_find_object_id(rb_gc_get_objspace(), obj, rb_gc_impl_object_id);
1917}
1918
1919static enum rb_id_table_iterator_result
1920cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
1921{
1922 size_t *total_size = data_ptr;
1923 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
1924 *total_size += sizeof(*ccs);
1925 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
1926 return ID_TABLE_CONTINUE;
1927}
1928
1929static size_t
1930cc_table_memsize(struct rb_id_table *cc_table)
1931{
1932 size_t total = rb_id_table_memsize(cc_table);
1933 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
1934 return total;
1935}
1936
1937size_t
1938rb_obj_memsize_of(VALUE obj)
1939{
1940 size_t size = 0;
1941
1942 if (SPECIAL_CONST_P(obj)) {
1943 return 0;
1944 }
1945
1946 if (FL_TEST(obj, FL_EXIVAR)) {
1947 size += rb_generic_ivar_memsize(obj);
1948 }
1949
1950 switch (BUILTIN_TYPE(obj)) {
1951 case T_OBJECT:
1952 if (rb_shape_obj_too_complex(obj)) {
1953 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
1954 }
1955 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
1956 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
1957 }
1958 break;
1959 case T_MODULE:
1960 case T_CLASS:
1961 if (RCLASS_M_TBL(obj)) {
1962 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1963 }
1964 // class IV sizes are allocated as powers of two
1965 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
1966 if (RCLASS_CVC_TBL(obj)) {
1967 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
1968 }
1969 if (RCLASS_EXT(obj)->const_tbl) {
1970 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
1971 }
1972 if (RCLASS_CC_TBL(obj)) {
1973 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1974 }
1975 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1976 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
1977 }
1978 break;
1979 case T_ICLASS:
1980 if (RICLASS_OWNS_M_TBL_P(obj)) {
1981 if (RCLASS_M_TBL(obj)) {
1982 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1983 }
1984 }
1985 if (RCLASS_CC_TBL(obj)) {
1986 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1987 }
1988 break;
1989 case T_STRING:
1990 size += rb_str_memsize(obj);
1991 break;
1992 case T_ARRAY:
1993 size += rb_ary_memsize(obj);
1994 break;
1995 case T_HASH:
1996 if (RHASH_ST_TABLE_P(obj)) {
1997 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
1998 /* st_table is in the slot */
1999 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2000 }
2001 break;
2002 case T_REGEXP:
2003 if (RREGEXP_PTR(obj)) {
2004 size += onig_memsize(RREGEXP_PTR(obj));
2005 }
2006 break;
2007 case T_DATA:
2008 size += rb_objspace_data_type_memsize(obj);
2009 break;
2010 case T_MATCH:
2011 {
2012 rb_matchext_t *rm = RMATCH_EXT(obj);
2013 size += onig_region_memsize(&rm->regs);
2014 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2015 }
2016 break;
2017 case T_FILE:
2018 if (RFILE(obj)->fptr) {
2019 size += rb_io_memsize(RFILE(obj)->fptr);
2020 }
2021 break;
2022 case T_RATIONAL:
2023 case T_COMPLEX:
2024 break;
2025 case T_IMEMO:
2026 size += rb_imemo_memsize(obj);
2027 break;
2028
2029 case T_FLOAT:
2030 case T_SYMBOL:
2031 break;
2032
2033 case T_BIGNUM:
2034 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2035 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2036 }
2037 break;
2038
2039 case T_NODE:
2040 UNEXPECTED_NODE(obj_memsize_of);
2041 break;
2042
2043 case T_STRUCT:
2044 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2045 RSTRUCT(obj)->as.heap.ptr) {
2046 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2047 }
2048 break;
2049
2050 case T_ZOMBIE:
2051 case T_MOVED:
2052 break;
2053
2054 default:
2055 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2056 BUILTIN_TYPE(obj), (void*)obj);
2057 }
2058
2059 return size + rb_gc_obj_slot_size(obj);
2060}
2061
2062static int
2063set_zero(st_data_t key, st_data_t val, st_data_t arg)
2064{
2065 VALUE k = (VALUE)key;
2066 VALUE hash = (VALUE)arg;
2067 rb_hash_aset(hash, k, INT2FIX(0));
2068 return ST_CONTINUE;
2069}
2070
2072 size_t counts[T_MASK+1];
2073 size_t freed;
2074 size_t total;
2075};
2076
2077static void
2078count_objects_i(VALUE obj, void *d)
2079{
2080 struct count_objects_data *data = (struct count_objects_data *)d;
2081
2082 if (RBASIC(obj)->flags) {
2083 data->counts[BUILTIN_TYPE(obj)]++;
2084 }
2085 else {
2086 data->freed++;
2087 }
2088
2089 data->total++;
2090}
2091
2092/*
2093 * call-seq:
2094 * ObjectSpace.count_objects([result_hash]) -> hash
2095 *
2096 * Counts all objects grouped by type.
2097 *
2098 * It returns a hash, such as:
2099 * {
2100 * :TOTAL=>10000,
2101 * :FREE=>3011,
2102 * :T_OBJECT=>6,
2103 * :T_CLASS=>404,
2104 * # ...
2105 * }
2106 *
2107 * The contents of the returned hash are implementation specific.
2108 * It may be changed in future.
2109 *
2110 * The keys starting with +:T_+ means live objects.
2111 * For example, +:T_ARRAY+ is the number of arrays.
2112 * +:FREE+ means object slots which is not used now.
2113 * +:TOTAL+ means sum of above.
2114 *
2115 * If the optional argument +result_hash+ is given,
2116 * it is overwritten and returned. This is intended to avoid probe effect.
2117 *
2118 * h = {}
2119 * ObjectSpace.count_objects(h)
2120 * puts h
2121 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2122 *
2123 * This method is only expected to work on C Ruby.
2124 *
2125 */
2126
2127static VALUE
2128count_objects(int argc, VALUE *argv, VALUE os)
2129{
2130 struct count_objects_data data = { 0 };
2131 VALUE hash = Qnil;
2132
2133 if (rb_check_arity(argc, 0, 1) == 1) {
2134 hash = argv[0];
2135 if (!RB_TYPE_P(hash, T_HASH))
2136 rb_raise(rb_eTypeError, "non-hash given");
2137 }
2138
2139 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2140
2141 if (NIL_P(hash)) {
2142 hash = rb_hash_new();
2143 }
2144 else if (!RHASH_EMPTY_P(hash)) {
2145 rb_hash_stlike_foreach(hash, set_zero, hash);
2146 }
2147 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data.total));
2148 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(data.freed));
2149
2150 for (size_t i = 0; i <= T_MASK; i++) {
2151 VALUE type = type_sym(i);
2152 if (data.counts[i])
2153 rb_hash_aset(hash, type, SIZET2NUM(data.counts[i]));
2154 }
2155
2156 return hash;
2157}
2158
2159#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2160
2161#define STACK_START (ec->machine.stack_start)
2162#define STACK_END (ec->machine.stack_end)
2163#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2164
2165#if STACK_GROW_DIRECTION < 0
2166# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2167#elif STACK_GROW_DIRECTION > 0
2168# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2169#else
2170# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2171 : (size_t)(STACK_END - STACK_START + 1))
2172#endif
2173#if !STACK_GROW_DIRECTION
2174int ruby_stack_grow_direction;
2175int
2176ruby_get_stack_grow_direction(volatile VALUE *addr)
2177{
2178 VALUE *end;
2179 SET_MACHINE_STACK_END(&end);
2180
2181 if (end > addr) return ruby_stack_grow_direction = 1;
2182 return ruby_stack_grow_direction = -1;
2183}
2184#endif
2185
2186size_t
2188{
2189 rb_execution_context_t *ec = GET_EC();
2190 SET_STACK_END;
2191 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2192 return STACK_LENGTH;
2193}
2194
2195#define PREVENT_STACK_OVERFLOW 1
2196#ifndef PREVENT_STACK_OVERFLOW
2197#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2198# define PREVENT_STACK_OVERFLOW 1
2199#else
2200# define PREVENT_STACK_OVERFLOW 0
2201#endif
2202#endif
2203#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2204static int
2205stack_check(rb_execution_context_t *ec, int water_mark)
2206{
2207 SET_STACK_END;
2208
2209 size_t length = STACK_LENGTH;
2210 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2211
2212 return length > maximum_length;
2213}
2214#else
2215#define stack_check(ec, water_mark) FALSE
2216#endif
2217
2218#define STACKFRAME_FOR_CALL_CFUNC 2048
2219
2220int
2221rb_ec_stack_check(rb_execution_context_t *ec)
2222{
2223 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2224}
2225
2226int
2228{
2229 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2230}
2231
2232/* ==================== Marking ==================== */
2233
2234#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2235 if (!RB_SPECIAL_CONST_P(obj)) { \
2236 rb_vm_t *vm = GET_VM(); \
2237 void *objspace = vm->gc.objspace; \
2238 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2239 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2240 (func)(objspace, (obj_or_ptr)); \
2241 } \
2242 else if (check_obj ? \
2243 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2244 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2245 true) { \
2246 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2247 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2248 vm->gc.mark_func_data = NULL; \
2249 mark_func_data->mark_func((obj), mark_func_data->data); \
2250 vm->gc.mark_func_data = mark_func_data; \
2251 } \
2252 } \
2253} while (0)
2254
2255static inline void
2256gc_mark_internal(VALUE obj)
2257{
2258 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2259}
2260
2261void
2262rb_gc_mark_movable(VALUE obj)
2263{
2264 gc_mark_internal(obj);
2265}
2266
2267void
2268rb_gc_mark_and_move(VALUE *ptr)
2269{
2270 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2271}
2272
2273static inline void
2274gc_mark_and_pin_internal(VALUE obj)
2275{
2276 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2277}
2278
2279void
2280rb_gc_mark(VALUE obj)
2281{
2282 gc_mark_and_pin_internal(obj);
2283}
2284
2285static inline void
2286gc_mark_maybe_internal(VALUE obj)
2287{
2288 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2289}
2290
2291void
2292rb_gc_mark_maybe(VALUE obj)
2293{
2294 gc_mark_maybe_internal(obj);
2295}
2296
2297void
2298rb_gc_mark_weak(VALUE *ptr)
2299{
2300 if (RB_SPECIAL_CONST_P(*ptr)) return;
2301
2302 rb_vm_t *vm = GET_VM();
2303 void *objspace = vm->gc.objspace;
2304 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2305 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2306
2307 rb_gc_impl_mark_weak(objspace, ptr);
2308 }
2309 else {
2310 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2311 }
2312}
2313
2314void
2315rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2316{
2317 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2318}
2319
2320ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2321static void
2322each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2323{
2324 VALUE v;
2325 while (n--) {
2326 v = *x;
2327 cb(v, data);
2328 x++;
2329 }
2330}
2331
2332static void
2333each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2334{
2335 if (end <= start) return;
2336 each_location(start, end - start, cb, data);
2337}
2338
2339static void
2340gc_mark_maybe_each_location(VALUE obj, void *data)
2341{
2342 gc_mark_maybe_internal(obj);
2343}
2344
2345void
2346rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2347{
2348 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2349}
2350
2351void
2352rb_gc_mark_values(long n, const VALUE *values)
2353{
2354 for (long i = 0; i < n; i++) {
2355 gc_mark_internal(values[i]);
2356 }
2357}
2358
2359void
2360rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2361{
2362 for (long i = 0; i < n; i++) {
2363 gc_mark_and_pin_internal(values[i]);
2364 }
2365}
2366
2367static int
2368mark_key(st_data_t key, st_data_t value, st_data_t data)
2369{
2370 gc_mark_and_pin_internal((VALUE)key);
2371
2372 return ST_CONTINUE;
2373}
2374
2375void
2376rb_mark_set(st_table *tbl)
2377{
2378 if (!tbl) return;
2379
2380 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2381}
2382
2383static int
2384mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2385{
2386 gc_mark_internal((VALUE)key);
2387 gc_mark_internal((VALUE)value);
2388
2389 return ST_CONTINUE;
2390}
2391
2392static int
2393pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2394{
2395 gc_mark_and_pin_internal((VALUE)key);
2396 gc_mark_and_pin_internal((VALUE)value);
2397
2398 return ST_CONTINUE;
2399}
2400
2401static int
2402pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2403{
2404 gc_mark_and_pin_internal((VALUE)key);
2405 gc_mark_internal((VALUE)value);
2406
2407 return ST_CONTINUE;
2408}
2409
2410static void
2411mark_hash(VALUE hash)
2412{
2413 if (rb_hash_compare_by_id_p(hash)) {
2414 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2415 }
2416 else {
2417 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2418 }
2419
2420 gc_mark_internal(RHASH(hash)->ifnone);
2421}
2422
2423void
2424rb_mark_hash(st_table *tbl)
2425{
2426 if (!tbl) return;
2427
2428 st_foreach(tbl, pin_key_pin_value, 0);
2429}
2430
2431static enum rb_id_table_iterator_result
2432mark_method_entry_i(VALUE me, void *objspace)
2433{
2434 gc_mark_internal(me);
2435
2436 return ID_TABLE_CONTINUE;
2437}
2438
2439static void
2440mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2441{
2442 if (tbl) {
2443 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2444 }
2445}
2446
2447#if STACK_GROW_DIRECTION < 0
2448#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2449#elif STACK_GROW_DIRECTION > 0
2450#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2451#else
2452#define GET_STACK_BOUNDS(start, end, appendix) \
2453 ((STACK_END < STACK_START) ? \
2454 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2455#endif
2456
2457static void
2458gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2459{
2460 gc_mark_maybe_internal(obj);
2461
2462#ifdef RUBY_ASAN_ENABLED
2463 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2464 void *fake_frame_start;
2465 void *fake_frame_end;
2466 bool is_fake_frame = asan_get_fake_stack_extents(
2467 ec->machine.asan_fake_stack_handle, obj,
2468 ec->machine.stack_start, ec->machine.stack_end,
2469 &fake_frame_start, &fake_frame_end
2470 );
2471 if (is_fake_frame) {
2472 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2473 }
2474#endif
2475}
2476
2477static VALUE
2478gc_location_internal(void *objspace, VALUE value)
2479{
2480 if (SPECIAL_CONST_P(value)) {
2481 return value;
2482 }
2483
2484 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2485
2486 return rb_gc_impl_location(objspace, value);
2487}
2488
2489VALUE
2490rb_gc_location(VALUE value)
2491{
2492 return gc_location_internal(rb_gc_get_objspace(), value);
2493}
2494
2495#if defined(__wasm__)
2496
2497
2498static VALUE *rb_stack_range_tmp[2];
2499
2500static void
2501rb_mark_locations(void *begin, void *end)
2502{
2503 rb_stack_range_tmp[0] = begin;
2504 rb_stack_range_tmp[1] = end;
2505}
2506
2507void
2508rb_gc_save_machine_context(void)
2509{
2510 // no-op
2511}
2512
2513# if defined(__EMSCRIPTEN__)
2514
2515static void
2516mark_current_machine_context(const rb_execution_context_t *ec)
2517{
2518 emscripten_scan_stack(rb_mark_locations);
2519 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2520
2521 emscripten_scan_registers(rb_mark_locations);
2522 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2523}
2524# else // use Asyncify version
2525
2526static void
2527mark_current_machine_context(const rb_execution_context_t *ec)
2528{
2529 VALUE *stack_start, *stack_end;
2530 SET_STACK_END;
2531 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2532 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2533
2534 rb_wasm_scan_locals(rb_mark_locations);
2535 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2536}
2537
2538# endif
2539
2540#else // !defined(__wasm__)
2541
2542void
2543rb_gc_save_machine_context(void)
2544{
2545 rb_thread_t *thread = GET_THREAD();
2546
2547 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2548}
2549
2550
2551static void
2552mark_current_machine_context(const rb_execution_context_t *ec)
2553{
2554 rb_gc_mark_machine_context(ec);
2555}
2556#endif
2557
2558void
2559rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2560{
2561 VALUE *stack_start, *stack_end;
2562
2563 GET_STACK_BOUNDS(stack_start, stack_end, 0);
2564 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2565
2566 void *data =
2567#ifdef RUBY_ASAN_ENABLED
2568 /* gc_mark_machine_stack_location_maybe() uses data as const */
2569 (rb_execution_context_t *)ec;
2570#else
2571 NULL;
2572#endif
2573
2574 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
2575 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
2576 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
2577}
2578
2579static int
2580rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
2581{
2582 gc_mark_and_pin_internal((VALUE)value);
2583
2584 return ST_CONTINUE;
2585}
2586
2587void
2588rb_mark_tbl(st_table *tbl)
2589{
2590 if (!tbl || tbl->num_entries == 0) return;
2591
2592 st_foreach(tbl, rb_mark_tbl_i, 0);
2593}
2594
2595static void
2596gc_mark_tbl_no_pin(st_table *tbl)
2597{
2598 if (!tbl || tbl->num_entries == 0) return;
2599
2600 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
2601}
2602
2603void
2604rb_mark_tbl_no_pin(st_table *tbl)
2605{
2606 gc_mark_tbl_no_pin(tbl);
2607}
2608
2609static enum rb_id_table_iterator_result
2610mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2611{
2612 struct rb_cvar_class_tbl_entry *entry;
2613
2614 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2615
2616 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2617 gc_mark_internal((VALUE)entry->cref);
2618
2619 return ID_TABLE_CONTINUE;
2620}
2621
2622static void
2623mark_cvc_tbl(void *objspace, VALUE klass)
2624{
2625 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
2626 if (tbl) {
2627 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2628 }
2629}
2630
2631static bool
2632gc_declarative_marking_p(const rb_data_type_t *type)
2633{
2634 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
2635}
2636
2637static enum rb_id_table_iterator_result
2638mark_const_table_i(VALUE value, void *objspace)
2639{
2640 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2641
2642 gc_mark_internal(ce->value);
2643 gc_mark_internal(ce->file);
2644
2645 return ID_TABLE_CONTINUE;
2646}
2647
2648void
2649rb_gc_mark_roots(void *objspace, const char **categoryp)
2650{
2651 rb_execution_context_t *ec = GET_EC();
2652 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2653
2654#define MARK_CHECKPOINT(category) do { \
2655 if (categoryp) *categoryp = category; \
2656} while (0)
2657
2658 MARK_CHECKPOINT("vm");
2659 rb_vm_mark(vm);
2660 if (vm->self) gc_mark_internal(vm->self);
2661
2662 MARK_CHECKPOINT("end_proc");
2663 rb_mark_end_proc();
2664
2665 MARK_CHECKPOINT("global_tbl");
2666 rb_gc_mark_global_tbl();
2667
2668#if USE_YJIT
2669 void rb_yjit_root_mark(void); // in Rust
2670
2671 if (rb_yjit_enabled_p) {
2672 MARK_CHECKPOINT("YJIT");
2673 rb_yjit_root_mark();
2674 }
2675#endif
2676
2677 MARK_CHECKPOINT("machine_context");
2678 mark_current_machine_context(ec);
2679
2680 MARK_CHECKPOINT("finish");
2681
2682#undef MARK_CHECKPOINT
2683}
2684
2685#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA(d)->type->function.dmark
2686
2687void
2688rb_gc_mark_children(void *objspace, VALUE obj)
2689{
2690 if (FL_TEST(obj, FL_EXIVAR)) {
2691 rb_mark_generic_ivar(obj);
2692 }
2693
2694 switch (BUILTIN_TYPE(obj)) {
2695 case T_FLOAT:
2696 case T_BIGNUM:
2697 case T_SYMBOL:
2698 /* Not immediates, but does not have references and singleton class.
2699 *
2700 * RSYMBOL(obj)->fstr intentionally not marked. See log for 96815f1e
2701 * ("symbol.c: remove rb_gc_mark_symbols()") */
2702 return;
2703
2704 case T_NIL:
2705 case T_FIXNUM:
2706 rb_bug("rb_gc_mark() called for broken object");
2707 break;
2708
2709 case T_NODE:
2710 UNEXPECTED_NODE(rb_gc_mark);
2711 break;
2712
2713 case T_IMEMO:
2714 rb_imemo_mark_and_move(obj, false);
2715 return;
2716
2717 default:
2718 break;
2719 }
2720
2721 gc_mark_internal(RBASIC(obj)->klass);
2722
2723 switch (BUILTIN_TYPE(obj)) {
2724 case T_CLASS:
2725 if (FL_TEST(obj, FL_SINGLETON)) {
2726 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
2727 }
2728 // Continue to the shared T_CLASS/T_MODULE
2729 case T_MODULE:
2730 if (RCLASS_SUPER(obj)) {
2731 gc_mark_internal(RCLASS_SUPER(obj));
2732 }
2733
2734 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2735 mark_cvc_tbl(objspace, obj);
2736 rb_cc_table_mark(obj);
2737 if (rb_shape_obj_too_complex(obj)) {
2738 gc_mark_tbl_no_pin((st_table *)RCLASS_IVPTR(obj));
2739 }
2740 else {
2741 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
2742 gc_mark_internal(RCLASS_IVPTR(obj)[i]);
2743 }
2744 }
2745
2746 if (RCLASS_CONST_TBL(obj)) {
2747 rb_id_table_foreach_values(RCLASS_CONST_TBL(obj), mark_const_table_i, objspace);
2748 }
2749
2750 gc_mark_internal(RCLASS_EXT(obj)->classpath);
2751 break;
2752
2753 case T_ICLASS:
2754 if (RICLASS_OWNS_M_TBL_P(obj)) {
2755 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2756 }
2757 if (RCLASS_SUPER(obj)) {
2758 gc_mark_internal(RCLASS_SUPER(obj));
2759 }
2760
2761 if (RCLASS_INCLUDER(obj)) {
2762 gc_mark_internal(RCLASS_INCLUDER(obj));
2763 }
2764 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
2765 rb_cc_table_mark(obj);
2766 break;
2767
2768 case T_ARRAY:
2769 if (ARY_SHARED_P(obj)) {
2770 VALUE root = ARY_SHARED_ROOT(obj);
2771 gc_mark_internal(root);
2772 }
2773 else {
2774 long len = RARRAY_LEN(obj);
2775 const VALUE *ptr = RARRAY_CONST_PTR(obj);
2776 for (long i = 0; i < len; i++) {
2777 gc_mark_internal(ptr[i]);
2778 }
2779 }
2780 break;
2781
2782 case T_HASH:
2783 mark_hash(obj);
2784 break;
2785
2786 case T_STRING:
2787 if (STR_SHARED_P(obj)) {
2788 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
2789 /* Embedded shared strings cannot be moved because this string
2790 * points into the slot of the shared string. There may be code
2791 * using the RSTRING_PTR on the stack, which would pin this
2792 * string but not pin the shared string, causing it to move. */
2793 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
2794 }
2795 else {
2796 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
2797 }
2798 }
2799 break;
2800
2801 case T_DATA: {
2802 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
2803
2804 if (ptr) {
2805 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
2806 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
2807
2808 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
2809 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
2810 }
2811 }
2812 else {
2813 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
2814 RTYPEDDATA(obj)->type->function.dmark :
2815 RDATA(obj)->dmark;
2816 if (mark_func) (*mark_func)(ptr);
2817 }
2818 }
2819
2820 break;
2821 }
2822
2823 case T_OBJECT: {
2824 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
2825
2826 if (rb_shape_obj_too_complex(obj)) {
2827 gc_mark_tbl_no_pin(ROBJECT_IV_HASH(obj));
2828 }
2829 else {
2830 const VALUE * const ptr = ROBJECT_IVPTR(obj);
2831
2832 uint32_t len = ROBJECT_IV_COUNT(obj);
2833 for (uint32_t i = 0; i < len; i++) {
2834 gc_mark_internal(ptr[i]);
2835 }
2836 }
2837
2838 if (shape) {
2839 VALUE klass = RBASIC_CLASS(obj);
2840
2841 // Increment max_iv_count if applicable, used to determine size pool allocation
2842 attr_index_t num_of_ivs = shape->next_iv_index;
2843 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
2844 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
2845 }
2846 }
2847
2848 break;
2849 }
2850
2851 case T_FILE:
2852 if (RFILE(obj)->fptr) {
2853 gc_mark_internal(RFILE(obj)->fptr->self);
2854 gc_mark_internal(RFILE(obj)->fptr->pathv);
2855 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
2856 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
2857 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
2858 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
2859 gc_mark_internal(RFILE(obj)->fptr->write_lock);
2860 gc_mark_internal(RFILE(obj)->fptr->timeout);
2861 }
2862 break;
2863
2864 case T_REGEXP:
2865 gc_mark_internal(RREGEXP(obj)->src);
2866 break;
2867
2868 case T_MATCH:
2869 gc_mark_internal(RMATCH(obj)->regexp);
2870 if (RMATCH(obj)->str) {
2871 gc_mark_internal(RMATCH(obj)->str);
2872 }
2873 break;
2874
2875 case T_RATIONAL:
2876 gc_mark_internal(RRATIONAL(obj)->num);
2877 gc_mark_internal(RRATIONAL(obj)->den);
2878 break;
2879
2880 case T_COMPLEX:
2881 gc_mark_internal(RCOMPLEX(obj)->real);
2882 gc_mark_internal(RCOMPLEX(obj)->imag);
2883 break;
2884
2885 case T_STRUCT: {
2886 const long len = RSTRUCT_LEN(obj);
2887 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
2888
2889 for (long i = 0; i < len; i++) {
2890 gc_mark_internal(ptr[i]);
2891 }
2892
2893 break;
2894 }
2895
2896 default:
2897 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
2898 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
2899 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
2900 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
2901 BUILTIN_TYPE(obj), (void *)obj,
2902 rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj) ? "corrupted object" : "non object");
2903 }
2904}
2905
2906size_t
2907rb_gc_obj_optimal_size(VALUE obj)
2908{
2909 switch (BUILTIN_TYPE(obj)) {
2910 case T_ARRAY:
2911 return rb_ary_size_as_embedded(obj);
2912
2913 case T_OBJECT:
2914 if (rb_shape_obj_too_complex(obj)) {
2915 return sizeof(struct RObject);
2916 }
2917 else {
2918 return rb_obj_embedded_size(ROBJECT_IV_CAPACITY(obj));
2919 }
2920
2921 case T_STRING:
2922 return rb_str_size_as_embedded(obj);
2923
2924 case T_HASH:
2925 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
2926
2927 default:
2928 return 0;
2929 }
2930}
2931
2932void
2933rb_gc_writebarrier(VALUE a, VALUE b)
2934{
2935 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
2936}
2937
2938void
2939rb_gc_writebarrier_unprotect(VALUE obj)
2940{
2941 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
2942}
2943
2944/*
2945 * remember `obj' if needed.
2946 */
2947void
2948rb_gc_writebarrier_remember(VALUE obj)
2949{
2950 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
2951}
2952
2953void
2954rb_gc_copy_attributes(VALUE dest, VALUE obj)
2955{
2956 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
2957}
2958
2959int
2960rb_gc_modular_gc_loaded_p(void)
2961{
2962#if USE_MODULAR_GC
2963 return rb_gc_functions.modular_gc_loaded_p;
2964#else
2965 return false;
2966#endif
2967}
2968
2969const char *
2970rb_gc_active_gc_name(void)
2971{
2972 const char *gc_name = rb_gc_impl_active_gc_name();
2973
2974 const size_t len = strlen(gc_name);
2975 if (len > RB_GC_MAX_NAME_LEN) {
2976 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
2977 RB_GC_MAX_NAME_LEN, len, gc_name);
2978 }
2979
2980 return gc_name;
2981}
2982
2983// TODO: rearchitect this function to work for a generic GC
2984size_t
2985rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
2986{
2987 return rb_gc_impl_obj_flags(rb_gc_get_objspace(), obj, flags, max);
2988}
2989
2990/* GC */
2991
2992void *
2993rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
2994{
2995 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
2996}
2997
2998void
2999rb_gc_ractor_cache_free(void *cache)
3000{
3001 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3002}
3003
3004void
3005rb_gc_register_mark_object(VALUE obj)
3006{
3007 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3008 return;
3009
3010 rb_vm_register_global_object(obj);
3011}
3012
3013void
3014rb_gc_register_address(VALUE *addr)
3015{
3016 rb_vm_t *vm = GET_VM();
3017
3018 VALUE obj = *addr;
3019
3020 struct global_object_list *tmp = ALLOC(struct global_object_list);
3021 tmp->next = vm->global_object_list;
3022 tmp->varptr = addr;
3023 vm->global_object_list = tmp;
3024
3025 /*
3026 * Because some C extensions have assignment-then-register bugs,
3027 * we guard `obj` here so that it would not get swept defensively.
3028 */
3029 RB_GC_GUARD(obj);
3030 if (0 && !SPECIAL_CONST_P(obj)) {
3031 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3032 rb_obj_class(obj));
3033 rb_print_backtrace(stderr);
3034 }
3035}
3036
3037void
3038rb_gc_unregister_address(VALUE *addr)
3039{
3040 rb_vm_t *vm = GET_VM();
3041 struct global_object_list *tmp = vm->global_object_list;
3042
3043 if (tmp->varptr == addr) {
3044 vm->global_object_list = tmp->next;
3045 xfree(tmp);
3046 return;
3047 }
3048 while (tmp->next) {
3049 if (tmp->next->varptr == addr) {
3050 struct global_object_list *t = tmp->next;
3051
3052 tmp->next = tmp->next->next;
3053 xfree(t);
3054 break;
3055 }
3056 tmp = tmp->next;
3057 }
3058}
3059
3060void
3062{
3063 rb_gc_register_address(var);
3064}
3065
3066static VALUE
3067gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3068{
3069 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3070
3071 return Qnil;
3072}
3073
3074/*
3075 * rb_objspace_each_objects() is special C API to walk through
3076 * Ruby object space. This C API is too difficult to use it.
3077 * To be frank, you should not use it. Or you need to read the
3078 * source code of this function and understand what this function does.
3079 *
3080 * 'callback' will be called several times (the number of heap page,
3081 * at current implementation) with:
3082 * vstart: a pointer to the first living object of the heap_page.
3083 * vend: a pointer to next to the valid heap_page area.
3084 * stride: a distance to next VALUE.
3085 *
3086 * If callback() returns non-zero, the iteration will be stopped.
3087 *
3088 * This is a sample callback code to iterate liveness objects:
3089 *
3090 * static int
3091 * sample_callback(void *vstart, void *vend, int stride, void *data)
3092 * {
3093 * VALUE v = (VALUE)vstart;
3094 * for (; v != (VALUE)vend; v += stride) {
3095 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3096 * // do something with live object 'v'
3097 * }
3098 * }
3099 * return 0; // continue to iteration
3100 * }
3101 *
3102 * Note: 'vstart' is not a top of heap_page. This point the first
3103 * living object to grasp at least one object to avoid GC issue.
3104 * This means that you can not walk through all Ruby object page
3105 * including freed object page.
3106 *
3107 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3108 * However, there are possibilities to pass variable values with
3109 * 'stride' with some reasons. You must use stride instead of
3110 * use some constant value in the iteration.
3111 */
3112void
3113rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3114{
3115 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3116}
3117
3118static void
3119gc_ref_update_array(void *objspace, VALUE v)
3120{
3121 if (ARY_SHARED_P(v)) {
3122 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3123
3124 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3125
3126 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3127 // If the root is embedded and its location has changed
3128 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3129 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3130 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3131 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3132 }
3133 }
3134 else {
3135 long len = RARRAY_LEN(v);
3136
3137 if (len > 0) {
3138 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3139 for (long i = 0; i < len; i++) {
3140 UPDATE_IF_MOVED(objspace, ptr[i]);
3141 }
3142 }
3143
3144 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3145 if (rb_ary_embeddable_p(v)) {
3146 rb_ary_make_embedded(v);
3147 }
3148 }
3149 }
3150}
3151
3152static void
3153gc_ref_update_object(void *objspace, VALUE v)
3154{
3155 VALUE *ptr = ROBJECT_IVPTR(v);
3156
3157 if (rb_shape_obj_too_complex(v)) {
3158 gc_ref_update_table_values_only(ROBJECT_IV_HASH(v));
3159 return;
3160 }
3161
3162 size_t slot_size = rb_gc_obj_slot_size(v);
3163 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
3164 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
3165 // Object can be re-embedded
3166 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
3167 RB_FL_SET_RAW(v, ROBJECT_EMBED);
3168 xfree(ptr);
3169 ptr = ROBJECT(v)->as.ary;
3170 }
3171
3172 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
3173 UPDATE_IF_MOVED(objspace, ptr[i]);
3174 }
3175}
3176
3177void
3178rb_gc_ref_update_table_values_only(st_table *tbl)
3179{
3180 gc_ref_update_table_values_only(tbl);
3181}
3182
3183/* Update MOVED references in a VALUE=>VALUE st_table */
3184void
3185rb_gc_update_tbl_refs(st_table *ptr)
3186{
3187 gc_update_table_refs(ptr);
3188}
3189
3190static void
3191gc_ref_update_hash(void *objspace, VALUE v)
3192{
3193 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3194}
3195
3196static void
3197gc_update_values(void *objspace, long n, VALUE *values)
3198{
3199 for (long i = 0; i < n; i++) {
3200 UPDATE_IF_MOVED(objspace, values[i]);
3201 }
3202}
3203
3204void
3205rb_gc_update_values(long n, VALUE *values)
3206{
3207 gc_update_values(rb_gc_get_objspace(), n, values);
3208}
3209
3210static enum rb_id_table_iterator_result
3211check_id_table_move(VALUE value, void *data)
3212{
3213 void *objspace = (void *)data;
3214
3215 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3216 return ID_TABLE_REPLACE;
3217 }
3218
3219 return ID_TABLE_CONTINUE;
3220}
3221
3222void
3223rb_gc_prepare_heap_process_object(VALUE obj)
3224{
3225 switch (BUILTIN_TYPE(obj)) {
3226 case T_STRING:
3227 // Precompute the string coderange. This both save time for when it will be
3228 // eventually needed, and avoid mutating heap pages after a potential fork.
3230 break;
3231 default:
3232 break;
3233 }
3234}
3235
3236void
3237rb_gc_prepare_heap(void)
3238{
3239 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3240}
3241
3242size_t
3243rb_gc_heap_id_for_size(size_t size)
3244{
3245 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3246}
3247
3248bool
3249rb_gc_size_allocatable_p(size_t size)
3250{
3251 return rb_gc_impl_size_allocatable_p(size);
3252}
3253
3254static enum rb_id_table_iterator_result
3255update_id_table(VALUE *value, void *data, int existing)
3256{
3257 void *objspace = (void *)data;
3258
3259 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3260 *value = gc_location_internal(objspace, (VALUE)*value);
3261 }
3262
3263 return ID_TABLE_CONTINUE;
3264}
3265
3266static void
3267update_m_tbl(void *objspace, struct rb_id_table *tbl)
3268{
3269 if (tbl) {
3270 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3271 }
3272}
3273
3274static enum rb_id_table_iterator_result
3275update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
3276{
3277 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3278 VM_ASSERT(vm_ccs_p(ccs));
3279
3280 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
3281 ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme);
3282 }
3283
3284 for (int i=0; i<ccs->len; i++) {
3285 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
3286 ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc);
3287 }
3288 }
3289
3290 // do not replace
3291 return ID_TABLE_CONTINUE;
3292}
3293
3294static void
3295update_cc_tbl(void *objspace, VALUE klass)
3296{
3297 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
3298 if (tbl) {
3299 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
3300 }
3301}
3302
3303static enum rb_id_table_iterator_result
3304update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3305{
3306 struct rb_cvar_class_tbl_entry *entry;
3307
3308 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3309
3310 if (entry->cref) {
3311 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3312 }
3313
3314 entry->class_value = gc_location_internal(objspace, entry->class_value);
3315
3316 return ID_TABLE_CONTINUE;
3317}
3318
3319static void
3320update_cvc_tbl(void *objspace, VALUE klass)
3321{
3322 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
3323 if (tbl) {
3324 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3325 }
3326}
3327
3328static enum rb_id_table_iterator_result
3329update_const_table(VALUE value, void *objspace)
3330{
3331 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3332
3333 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3334 ce->value = gc_location_internal(objspace, ce->value);
3335 }
3336
3337 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3338 ce->file = gc_location_internal(objspace, ce->file);
3339 }
3340
3341 return ID_TABLE_CONTINUE;
3342}
3343
3344static void
3345update_const_tbl(void *objspace, struct rb_id_table *tbl)
3346{
3347 if (!tbl) return;
3348 rb_id_table_foreach_values(tbl, update_const_table, objspace);
3349}
3350
3351static void
3352update_subclass_entries(void *objspace, rb_subclass_entry_t *entry)
3353{
3354 while (entry) {
3355 UPDATE_IF_MOVED(objspace, entry->klass);
3356 entry = entry->next;
3357 }
3358}
3359
3360static void
3361update_class_ext(void *objspace, rb_classext_t *ext)
3362{
3363 UPDATE_IF_MOVED(objspace, ext->origin_);
3364 UPDATE_IF_MOVED(objspace, ext->includer);
3365 UPDATE_IF_MOVED(objspace, ext->refined_class);
3366 update_subclass_entries(objspace, ext->subclasses);
3367}
3368
3369static void
3370update_superclasses(void *objspace, VALUE obj)
3371{
3372 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3373 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
3374 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
3375 }
3376 }
3377}
3378
3379extern rb_symbols_t ruby_global_symbols;
3380#define global_symbols ruby_global_symbols
3381
3382#if USE_MODULAR_GC
3383struct global_vm_table_foreach_data {
3384 vm_table_foreach_callback_func callback;
3385 vm_table_update_callback_func update_callback;
3386 void *data;
3387};
3388
3389static int
3390vm_weak_table_foreach_key(st_data_t key, st_data_t value, st_data_t data, int error)
3391{
3392 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3393
3394 return iter_data->callback((VALUE)key, iter_data->data);
3395}
3396
3397static int
3398vm_weak_table_foreach_update_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3399{
3400 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3401
3402 return iter_data->update_callback((VALUE *)key, iter_data->data);
3403}
3404
3405static int
3406vm_weak_table_str_sym_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3407{
3408 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3409
3410 if (STATIC_SYM_P(value)) {
3411 return ST_CONTINUE;
3412 }
3413 else {
3414 return iter_data->callback((VALUE)value, iter_data->data);
3415 }
3416}
3417
3418static int
3419vm_weak_table_foreach_update_value(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3420{
3421 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3422
3423 return iter_data->update_callback((VALUE *)value, iter_data->data);
3424}
3425
3426static int
3427vm_weak_table_gen_ivar_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3428{
3429 int retval = vm_weak_table_foreach_key(key, value, data, error);
3430 if (retval == ST_DELETE) {
3431 FL_UNSET((VALUE)key, FL_EXIVAR);
3432 }
3433 return retval;
3434}
3435
3436static int
3437vm_weak_table_frozen_strings_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3438{
3439 GC_ASSERT(RB_TYPE_P((VALUE)key, T_STRING));
3440
3441 int retval = vm_weak_table_foreach_key(key, value, data, error);
3442 if (retval == ST_DELETE) {
3443 FL_UNSET((VALUE)key, RSTRING_FSTR);
3444 }
3445 return retval;
3446}
3447
3448struct st_table *rb_generic_ivtbl_get(void);
3449
3450void
3451rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
3452 vm_table_update_callback_func update_callback,
3453 void *data,
3454 enum rb_gc_vm_weak_tables table)
3455{
3456 rb_vm_t *vm = GET_VM();
3457
3458 struct global_vm_table_foreach_data foreach_data = {
3459 .callback = callback,
3460 .update_callback = update_callback,
3461 .data = data
3462 };
3463
3464 switch (table) {
3465 case RB_GC_VM_CI_TABLE: {
3466 st_foreach_with_replace(
3467 vm->ci_table,
3468 vm_weak_table_foreach_key,
3469 vm_weak_table_foreach_update_key,
3470 (st_data_t)&foreach_data
3471 );
3472 break;
3473 }
3474 case RB_GC_VM_OVERLOADED_CME_TABLE: {
3475 st_foreach_with_replace(
3476 vm->overloaded_cme_table,
3477 vm_weak_table_foreach_key,
3478 vm_weak_table_foreach_update_key,
3479 (st_data_t)&foreach_data
3480 );
3481 break;
3482 }
3483 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
3484 st_foreach_with_replace(
3485 global_symbols.str_sym,
3486 vm_weak_table_str_sym_foreach,
3487 vm_weak_table_foreach_update_value,
3488 (st_data_t)&foreach_data
3489 );
3490 break;
3491 }
3492 case RB_GC_VM_GENERIC_IV_TABLE: {
3493 st_table *generic_iv_tbl = rb_generic_ivtbl_get();
3494 st_foreach_with_replace(
3495 generic_iv_tbl,
3496 vm_weak_table_gen_ivar_foreach,
3497 vm_weak_table_foreach_update_key,
3498 (st_data_t)&foreach_data
3499 );
3500 break;
3501 }
3502 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
3503 st_table *frozen_strings = GET_VM()->frozen_strings;
3504 st_foreach_with_replace(
3505 frozen_strings,
3506 vm_weak_table_frozen_strings_foreach,
3507 vm_weak_table_foreach_update_key,
3508 (st_data_t)&foreach_data
3509 );
3510 break;
3511 }
3512 default:
3513 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
3514 }
3515}
3516#endif
3517
3518void
3519rb_gc_update_vm_references(void *objspace)
3520{
3521 rb_execution_context_t *ec = GET_EC();
3522 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3523
3524 rb_vm_update_references(vm);
3525 rb_gc_update_global_tbl();
3526 global_symbols.ids = gc_location_internal(objspace, global_symbols.ids);
3527 global_symbols.dsymbol_fstr_hash = gc_location_internal(objspace, global_symbols.dsymbol_fstr_hash);
3528 gc_update_table_refs(global_symbols.str_sym);
3529
3530#if USE_YJIT
3531 void rb_yjit_root_update_references(void); // in Rust
3532
3533 if (rb_yjit_enabled_p) {
3534 rb_yjit_root_update_references();
3535 }
3536#endif
3537}
3538
3539void
3540rb_gc_update_object_references(void *objspace, VALUE obj)
3541{
3542 if (FL_TEST(obj, FL_EXIVAR)) {
3543 rb_ref_update_generic_ivar(obj);
3544 }
3545
3546 switch (BUILTIN_TYPE(obj)) {
3547 case T_CLASS:
3548 if (FL_TEST(obj, FL_SINGLETON)) {
3549 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
3550 }
3551 // Continue to the shared T_CLASS/T_MODULE
3552 case T_MODULE:
3553 if (RCLASS_SUPER((VALUE)obj)) {
3554 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3555 }
3556 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3557 update_cc_tbl(objspace, obj);
3558 update_cvc_tbl(objspace, obj);
3559 update_superclasses(objspace, obj);
3560
3561 if (rb_shape_obj_too_complex(obj)) {
3562 gc_ref_update_table_values_only(RCLASS_IV_HASH(obj));
3563 }
3564 else {
3565 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
3566 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
3567 }
3568 }
3569
3570 update_class_ext(objspace, RCLASS_EXT(obj));
3571 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
3572
3573 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
3574 break;
3575
3576 case T_ICLASS:
3577 if (RICLASS_OWNS_M_TBL_P(obj)) {
3578 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3579 }
3580 if (RCLASS_SUPER((VALUE)obj)) {
3581 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3582 }
3583 update_class_ext(objspace, RCLASS_EXT(obj));
3584 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
3585 update_cc_tbl(objspace, obj);
3586 break;
3587
3588 case T_IMEMO:
3589 rb_imemo_mark_and_move(obj, true);
3590 return;
3591
3592 case T_NIL:
3593 case T_FIXNUM:
3594 case T_NODE:
3595 case T_MOVED:
3596 case T_NONE:
3597 /* These can't move */
3598 return;
3599
3600 case T_ARRAY:
3601 gc_ref_update_array(objspace, obj);
3602 break;
3603
3604 case T_HASH:
3605 gc_ref_update_hash(objspace, obj);
3606 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
3607 break;
3608
3609 case T_STRING:
3610 {
3611 if (STR_SHARED_P(obj)) {
3612 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
3613 }
3614
3615 /* If, after move the string is not embedded, and can fit in the
3616 * slot it's been placed in, then re-embed it. */
3617 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
3618 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
3619 rb_str_make_embedded(obj);
3620 }
3621 }
3622
3623 break;
3624 }
3625 case T_DATA:
3626 /* Call the compaction callback, if it exists */
3627 {
3628 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3629 if (ptr) {
3630 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
3631 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3632
3633 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3634 VALUE *ref = (VALUE *)((char *)ptr + offset);
3635 *ref = gc_location_internal(objspace, *ref);
3636 }
3637 }
3638 else if (RTYPEDDATA_P(obj)) {
3639 RUBY_DATA_FUNC compact_func = RTYPEDDATA(obj)->type->function.dcompact;
3640 if (compact_func) (*compact_func)(ptr);
3641 }
3642 }
3643 }
3644 break;
3645
3646 case T_OBJECT:
3647 gc_ref_update_object(objspace, obj);
3648 break;
3649
3650 case T_FILE:
3651 if (RFILE(obj)->fptr) {
3652 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
3653 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
3654 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
3655 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
3656 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
3657 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
3658 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
3659 }
3660 break;
3661 case T_REGEXP:
3662 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
3663 break;
3664
3665 case T_SYMBOL:
3666 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
3667 break;
3668
3669 case T_FLOAT:
3670 case T_BIGNUM:
3671 break;
3672
3673 case T_MATCH:
3674 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
3675
3676 if (RMATCH(obj)->str) {
3677 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
3678 }
3679 break;
3680
3681 case T_RATIONAL:
3682 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
3683 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
3684 break;
3685
3686 case T_COMPLEX:
3687 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
3688 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
3689
3690 break;
3691
3692 case T_STRUCT:
3693 {
3694 long i, len = RSTRUCT_LEN(obj);
3695 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
3696
3697 for (i = 0; i < len; i++) {
3698 UPDATE_IF_MOVED(objspace, ptr[i]);
3699 }
3700 }
3701 break;
3702 default:
3703 rb_bug("unreachable");
3704 break;
3705 }
3706
3707 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
3708}
3709
3710VALUE
3712{
3713 rb_gc();
3714 return Qnil;
3715}
3716
3717void
3719{
3720 unless_objspace(objspace) { return; }
3721
3722 rb_gc_impl_start(objspace, true, true, true, false);
3723}
3724
3725int
3727{
3728 unless_objspace(objspace) { return FALSE; }
3729
3730 return rb_gc_impl_during_gc_p(objspace);
3731}
3732
3733size_t
3735{
3736 return rb_gc_impl_gc_count(rb_gc_get_objspace());
3737}
3738
3739static VALUE
3740gc_count(rb_execution_context_t *ec, VALUE self)
3741{
3742 return SIZET2NUM(rb_gc_count());
3743}
3744
3745VALUE
3746rb_gc_latest_gc_info(VALUE key)
3747{
3748 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
3749 rb_raise(rb_eTypeError, "non-hash or symbol given");
3750 }
3751
3752 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
3753
3754 if (val == Qundef) {
3755 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
3756 }
3757
3758 return val;
3759}
3760
3761static VALUE
3762gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
3763{
3764 if (NIL_P(arg)) {
3765 arg = rb_hash_new();
3766 }
3767 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3768 rb_raise(rb_eTypeError, "non-hash or symbol given");
3769 }
3770
3771 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3772
3773 if (ret == Qundef) {
3774 GC_ASSERT(SYMBOL_P(arg));
3775
3776 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3777 }
3778
3779 return ret;
3780}
3781
3782size_t
3783rb_gc_stat(VALUE arg)
3784{
3785 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3786 rb_raise(rb_eTypeError, "non-hash or symbol given");
3787 }
3788
3789 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3790
3791 if (ret == Qundef) {
3792 GC_ASSERT(SYMBOL_P(arg));
3793
3794 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3795 }
3796
3797 if (SYMBOL_P(arg)) {
3798 return NUM2SIZET(ret);
3799 }
3800 else {
3801 return 0;
3802 }
3803}
3804
3805static VALUE
3806gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
3807{
3808 if (NIL_P(arg)) {
3809 arg = rb_hash_new();
3810 }
3811
3812 if (NIL_P(heap_name)) {
3813 if (!RB_TYPE_P(arg, T_HASH)) {
3814 rb_raise(rb_eTypeError, "non-hash given");
3815 }
3816 }
3817 else if (FIXNUM_P(heap_name)) {
3818 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
3819 rb_raise(rb_eTypeError, "non-hash or symbol given");
3820 }
3821 }
3822 else {
3823 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
3824 }
3825
3826 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
3827
3828 if (ret == Qundef) {
3829 GC_ASSERT(SYMBOL_P(arg));
3830
3831 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3832 }
3833
3834 return ret;
3835}
3836
3837static VALUE
3838gc_config_get(rb_execution_context_t *ec, VALUE self)
3839{
3840 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
3841 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
3842
3843 return cfg_hash;
3844}
3845
3846static VALUE
3847gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
3848{
3849 void *objspace = rb_gc_get_objspace();
3850
3851 rb_gc_impl_config_set(objspace, hash);
3852
3853 return rb_gc_impl_config_get(objspace);
3854}
3855
3856static VALUE
3857gc_stress_get(rb_execution_context_t *ec, VALUE self)
3858{
3859 return rb_gc_impl_stress_get(rb_gc_get_objspace());
3860}
3861
3862static VALUE
3863gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
3864{
3865 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
3866
3867 return flag;
3868}
3869
3870void
3871rb_gc_initial_stress_set(VALUE flag)
3872{
3873 initial_stress = flag;
3874}
3875
3876size_t *
3877rb_gc_heap_sizes(void)
3878{
3879 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
3880}
3881
3882VALUE
3884{
3885 return rb_objspace_gc_enable(rb_gc_get_objspace());
3886}
3887
3888VALUE
3889rb_objspace_gc_enable(void *objspace)
3890{
3891 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3892 rb_gc_impl_gc_enable(objspace);
3893 return RBOOL(disabled);
3894}
3895
3896static VALUE
3897gc_enable(rb_execution_context_t *ec, VALUE _)
3898{
3899 return rb_gc_enable();
3900}
3901
3902static VALUE
3903gc_disable_no_rest(void *objspace)
3904{
3905 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3906 rb_gc_impl_gc_disable(objspace, false);
3907 return RBOOL(disabled);
3908}
3909
3910VALUE
3911rb_gc_disable_no_rest(void)
3912{
3913 return gc_disable_no_rest(rb_gc_get_objspace());
3914}
3915
3916VALUE
3918{
3919 return rb_objspace_gc_disable(rb_gc_get_objspace());
3920}
3921
3922VALUE
3923rb_objspace_gc_disable(void *objspace)
3924{
3925 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3926 rb_gc_impl_gc_disable(objspace, true);
3927 return RBOOL(disabled);
3928}
3929
3930static VALUE
3931gc_disable(rb_execution_context_t *ec, VALUE _)
3932{
3933 return rb_gc_disable();
3934}
3935
3936// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
3937void
3938ruby_gc_set_params(void)
3939{
3940 rb_gc_impl_set_params(rb_gc_get_objspace());
3941}
3942
3943void
3944rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
3945{
3946 RB_VM_LOCK_ENTER();
3947 {
3948 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
3949
3950 if (!RB_SPECIAL_CONST_P(obj)) {
3951 rb_vm_t *vm = GET_VM();
3952 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
3953 struct gc_mark_func_data_struct mfd = {
3954 .mark_func = func,
3955 .data = data,
3956 };
3957
3958 vm->gc.mark_func_data = &mfd;
3959 rb_gc_mark_children(rb_gc_get_objspace(), obj);
3960 vm->gc.mark_func_data = prev_mfd;
3961 }
3962 }
3963 RB_VM_LOCK_LEAVE();
3964}
3965
3967 const char *category;
3968 void (*func)(const char *category, VALUE, void *);
3969 void *data;
3970};
3971
3972static void
3973root_objects_from(VALUE obj, void *ptr)
3974{
3975 const struct root_objects_data *data = (struct root_objects_data *)ptr;
3976 (*data->func)(data->category, obj, data->data);
3977}
3978
3979void
3980rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
3981{
3982 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
3983
3984 rb_vm_t *vm = GET_VM();
3985
3986 struct root_objects_data data = {
3987 .func = func,
3988 .data = passing_data,
3989 };
3990
3991 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
3992 struct gc_mark_func_data_struct mfd = {
3993 .mark_func = root_objects_from,
3994 .data = &data,
3995 };
3996
3997 vm->gc.mark_func_data = &mfd;
3998 rb_gc_save_machine_context();
3999 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4000 vm->gc.mark_func_data = prev_mfd;
4001}
4002
4003/*
4004 ------------------------------ DEBUG ------------------------------
4005*/
4006
4007static const char *
4008type_name(int type, VALUE obj)
4009{
4010 switch (type) {
4011#define TYPE_NAME(t) case (t): return #t;
4012 TYPE_NAME(T_NONE);
4013 TYPE_NAME(T_OBJECT);
4014 TYPE_NAME(T_CLASS);
4015 TYPE_NAME(T_MODULE);
4016 TYPE_NAME(T_FLOAT);
4017 TYPE_NAME(T_STRING);
4018 TYPE_NAME(T_REGEXP);
4019 TYPE_NAME(T_ARRAY);
4020 TYPE_NAME(T_HASH);
4021 TYPE_NAME(T_STRUCT);
4022 TYPE_NAME(T_BIGNUM);
4023 TYPE_NAME(T_FILE);
4024 TYPE_NAME(T_MATCH);
4025 TYPE_NAME(T_COMPLEX);
4026 TYPE_NAME(T_RATIONAL);
4027 TYPE_NAME(T_NIL);
4028 TYPE_NAME(T_TRUE);
4029 TYPE_NAME(T_FALSE);
4030 TYPE_NAME(T_SYMBOL);
4031 TYPE_NAME(T_FIXNUM);
4032 TYPE_NAME(T_UNDEF);
4033 TYPE_NAME(T_IMEMO);
4034 TYPE_NAME(T_ICLASS);
4035 TYPE_NAME(T_MOVED);
4036 TYPE_NAME(T_ZOMBIE);
4037 case T_DATA:
4038 if (obj && rb_objspace_data_type_name(obj)) {
4039 return rb_objspace_data_type_name(obj);
4040 }
4041 return "T_DATA";
4042#undef TYPE_NAME
4043 }
4044 return "unknown";
4045}
4046
4047static const char *
4048obj_type_name(VALUE obj)
4049{
4050 return type_name(TYPE(obj), obj);
4051}
4052
4053const char *
4054rb_method_type_name(rb_method_type_t type)
4055{
4056 switch (type) {
4057 case VM_METHOD_TYPE_ISEQ: return "iseq";
4058 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4059 case VM_METHOD_TYPE_IVAR: return "ivar";
4060 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4061 case VM_METHOD_TYPE_ALIAS: return "alias";
4062 case VM_METHOD_TYPE_REFINED: return "refined";
4063 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4064 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4065 case VM_METHOD_TYPE_MISSING: return "missing";
4066 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4067 case VM_METHOD_TYPE_UNDEF: return "undef";
4068 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4069 }
4070 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4071}
4072
4073static void
4074rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4075{
4076 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4077 VALUE path = rb_iseq_path(iseq);
4078 int n = ISEQ_BODY(iseq)->location.first_lineno;
4079 snprintf(buff, buff_size, " %s@%s:%d",
4080 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4081 RSTRING_PTR(path), n);
4082 }
4083}
4084
4085static int
4086str_len_no_raise(VALUE str)
4087{
4088 long len = RSTRING_LEN(str);
4089 if (len < 0) return 0;
4090 if (len > INT_MAX) return INT_MAX;
4091 return (int)len;
4092}
4093
4094#define BUFF_ARGS buff + pos, buff_size - pos
4095#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4096#define APPEND_S(s) do { \
4097 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4098 goto end; \
4099 } \
4100 else { \
4101 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4102 } \
4103 } while (0)
4104#define C(c, s) ((c) != 0 ? (s) : " ")
4105
4106static size_t
4107rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4108{
4109 size_t pos = 0;
4110
4111 if (SPECIAL_CONST_P(obj)) {
4112 APPEND_F("%s", obj_type_name(obj));
4113
4114 if (FIXNUM_P(obj)) {
4115 APPEND_F(" %ld", FIX2LONG(obj));
4116 }
4117 else if (SYMBOL_P(obj)) {
4118 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4119 }
4120 }
4121 else {
4122 // const int age = RVALUE_AGE_GET(obj);
4123
4124 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4125 // TODO: fixme
4126 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4127 // (void *)obj, age,
4128 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4129 // C(RVALUE_MARK_BITMAP(obj), "M"),
4130 // C(RVALUE_PIN_BITMAP(obj), "P"),
4131 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4132 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4133 // C(rb_objspace_garbage_object_p(obj), "G"),
4134 // obj_type_name(obj));
4135 }
4136 else {
4137 /* fake */
4138 // APPEND_F("%p [%dXXXX] %s",
4139 // (void *)obj, age,
4140 // obj_type_name(obj));
4141 }
4142
4143 if (internal_object_p(obj)) {
4144 /* ignore */
4145 }
4146 else if (RBASIC(obj)->klass == 0) {
4147 APPEND_S("(temporary internal)");
4148 }
4149 else if (RTEST(RBASIC(obj)->klass)) {
4150 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4151 if (!NIL_P(class_path)) {
4152 APPEND_F("(%s)", RSTRING_PTR(class_path));
4153 }
4154 }
4155 }
4156 end:
4157
4158 return pos;
4159}
4160
4161const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4162
4163static size_t
4164rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4165{
4166 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4167 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4168
4169 switch (type) {
4170 case T_NODE:
4171 UNEXPECTED_NODE(rb_raw_obj_info);
4172 break;
4173 case T_ARRAY:
4174 if (ARY_SHARED_P(obj)) {
4175 APPEND_S("shared -> ");
4176 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4177 }
4178 else if (ARY_EMBED_P(obj)) {
4179 APPEND_F("[%s%s] len: %ld (embed)",
4180 C(ARY_EMBED_P(obj), "E"),
4181 C(ARY_SHARED_P(obj), "S"),
4182 RARRAY_LEN(obj));
4183 }
4184 else {
4185 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
4186 C(ARY_EMBED_P(obj), "E"),
4187 C(ARY_SHARED_P(obj), "S"),
4188 RARRAY_LEN(obj),
4189 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
4190 (void *)RARRAY_CONST_PTR(obj));
4191 }
4192 break;
4193 case T_STRING: {
4194 if (STR_SHARED_P(obj)) {
4195 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4196 }
4197 else {
4198 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4199
4200 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4201 }
4202 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4203 break;
4204 }
4205 case T_SYMBOL: {
4206 VALUE fstr = RSYMBOL(obj)->fstr;
4207 ID id = RSYMBOL(obj)->id;
4208 if (RB_TYPE_P(fstr, T_STRING)) {
4209 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4210 }
4211 else {
4212 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4213 }
4214 break;
4215 }
4216 case T_MOVED: {
4217 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4218 break;
4219 }
4220 case T_HASH: {
4221 APPEND_F("[%c] %"PRIdSIZE,
4222 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4223 RHASH_SIZE(obj));
4224 break;
4225 }
4226 case T_CLASS:
4227 case T_MODULE:
4228 {
4229 VALUE class_path = rb_class_path_cached(obj);
4230 if (!NIL_P(class_path)) {
4231 APPEND_F("%s", RSTRING_PTR(class_path));
4232 }
4233 else {
4234 APPEND_S("(anon)");
4235 }
4236 break;
4237 }
4238 case T_ICLASS:
4239 {
4240 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4241 if (!NIL_P(class_path)) {
4242 APPEND_F("src:%s", RSTRING_PTR(class_path));
4243 }
4244 break;
4245 }
4246 case T_OBJECT:
4247 {
4248 if (rb_shape_obj_too_complex(obj)) {
4249 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
4250 APPEND_F("(too_complex) len:%zu", hash_len);
4251 }
4252 else {
4253 uint32_t len = ROBJECT_IV_CAPACITY(obj);
4254
4255 if (RBASIC(obj)->flags & ROBJECT_EMBED) {
4256 APPEND_F("(embed) len:%d", len);
4257 }
4258 else {
4259 VALUE *ptr = ROBJECT_IVPTR(obj);
4260 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
4261 }
4262 }
4263 }
4264 break;
4265 case T_DATA: {
4266 const struct rb_block *block;
4267 const rb_iseq_t *iseq;
4268 if (rb_obj_is_proc(obj) &&
4269 (block = vm_proc_block(obj)) != NULL &&
4270 (vm_block_type(block) == block_type_iseq) &&
4271 (iseq = vm_block_iseq(block)) != NULL) {
4272 rb_raw_iseq_info(BUFF_ARGS, iseq);
4273 }
4274 else if (rb_ractor_p(obj)) {
4275 rb_ractor_t *r = (void *)DATA_PTR(obj);
4276 if (r) {
4277 APPEND_F("r:%d", r->pub.id);
4278 }
4279 }
4280 else {
4281 const char * const type_name = rb_objspace_data_type_name(obj);
4282 if (type_name) {
4283 APPEND_F("%s", type_name);
4284 }
4285 }
4286 break;
4287 }
4288 case T_IMEMO: {
4289 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4290
4291 switch (imemo_type(obj)) {
4292 case imemo_ment:
4293 {
4294 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4295
4296 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4297 rb_id2name(me->called_id),
4298 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4299 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4300 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4301 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4302 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4303 me->def ? rb_method_type_name(me->def->type) : "NULL",
4304 me->def ? me->def->aliased : -1,
4305 (void *)me->owner, // obj_info(me->owner),
4306 (void *)me->defined_class); //obj_info(me->defined_class)));
4307
4308 if (me->def) {
4309 switch (me->def->type) {
4310 case VM_METHOD_TYPE_ISEQ:
4311 APPEND_S(" (iseq:");
4312 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4313 APPEND_S(")");
4314 break;
4315 default:
4316 break;
4317 }
4318 }
4319
4320 break;
4321 }
4322 case imemo_iseq: {
4323 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4324 rb_raw_iseq_info(BUFF_ARGS, iseq);
4325 break;
4326 }
4327 case imemo_callinfo:
4328 {
4329 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4330 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4331 rb_id2name(vm_ci_mid(ci)),
4332 vm_ci_flag(ci),
4333 vm_ci_argc(ci),
4334 vm_ci_kwarg(ci) ? "available" : "NULL");
4335 break;
4336 }
4337 case imemo_callcache:
4338 {
4339 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4340 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
4341 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4342
4343 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4344 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4345 cme ? rb_id2name(cme->called_id) : "<NULL>",
4346 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4347 (void *)cme,
4348 (void *)(uintptr_t)vm_cc_call(cc));
4349 break;
4350 }
4351 default:
4352 break;
4353 }
4354 }
4355 default:
4356 break;
4357 }
4358 }
4359 end:
4360
4361 return pos;
4362}
4363
4364#undef C
4365
4366void
4367rb_asan_poison_object(VALUE obj)
4368{
4369 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4370 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4371}
4372
4373void
4374rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4375{
4376 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4377 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4378}
4379
4380void *
4381rb_asan_poisoned_object_p(VALUE obj)
4382{
4383 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4384 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4385}
4386
4387#define asan_unpoisoning_object(obj) \
4388 for (void *poisoned = asan_unpoison_object_temporary(obj), \
4389 *unpoisoning = &poisoned; /* flag to loop just once */ \
4390 unpoisoning; \
4391 unpoisoning = asan_poison_object_restore(obj, poisoned))
4392
4393const char *
4394rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4395{
4396 asan_unpoisoning_object(obj) {
4397 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4398 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4399 if (pos >= buff_size) {} // truncated
4400 }
4401
4402 return buff;
4403}
4404
4405#undef APPEND_S
4406#undef APPEND_F
4407#undef BUFF_ARGS
4408
4409#if RGENGC_OBJ_INFO
4410#define OBJ_INFO_BUFFERS_NUM 10
4411#define OBJ_INFO_BUFFERS_SIZE 0x100
4412static rb_atomic_t obj_info_buffers_index = 0;
4413static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
4414
4415/* Increments *var atomically and resets *var to 0 when maxval is
4416 * reached. Returns the wraparound old *var value (0...maxval). */
4417static rb_atomic_t
4418atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
4419{
4420 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
4421 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
4422 const rb_atomic_t newval = oldval + 1;
4423 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
4424 oldval %= maxval;
4425 }
4426 return oldval;
4427}
4428
4429static const char *
4430obj_info(VALUE obj)
4431{
4432 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
4433 char *const buff = obj_info_buffers[index];
4434 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
4435}
4436#else
4437static const char *
4438obj_info(VALUE obj)
4439{
4440 return obj_type_name(obj);
4441}
4442#endif
4443
4444/*
4445 ------------------------ Extended allocator ------------------------
4446*/
4447
4449 VALUE exc;
4450 const char *fmt;
4451 va_list *ap;
4452};
4453
4454static void *
4455gc_vraise(void *ptr)
4456{
4457 struct gc_raise_tag *argv = ptr;
4458 rb_vraise(argv->exc, argv->fmt, *argv->ap);
4459 UNREACHABLE_RETURN(NULL);
4460}
4461
4462static void
4463gc_raise(VALUE exc, const char *fmt, ...)
4464{
4465 va_list ap;
4466 va_start(ap, fmt);
4467 struct gc_raise_tag argv = {
4468 exc, fmt, &ap,
4469 };
4470
4471 if (ruby_thread_has_gvl_p()) {
4472 gc_vraise(&argv);
4474 }
4475 else if (ruby_native_thread_p()) {
4476 rb_thread_call_with_gvl(gc_vraise, &argv);
4478 }
4479 else {
4480 /* Not in a ruby thread */
4481 fprintf(stderr, "%s", "[FATAL] ");
4482 vfprintf(stderr, fmt, ap);
4483 }
4484
4485 va_end(ap);
4486 abort();
4487}
4488
4489NORETURN(static void negative_size_allocation_error(const char *));
4490static void
4491negative_size_allocation_error(const char *msg)
4492{
4493 gc_raise(rb_eNoMemError, "%s", msg);
4494}
4495
4496static void *
4497ruby_memerror_body(void *dummy)
4498{
4499 rb_memerror();
4500 return 0;
4501}
4502
4503NORETURN(static void ruby_memerror(void));
4505static void
4506ruby_memerror(void)
4507{
4508 if (ruby_thread_has_gvl_p()) {
4509 rb_memerror();
4510 }
4511 else {
4512 if (ruby_native_thread_p()) {
4513 rb_thread_call_with_gvl(ruby_memerror_body, 0);
4514 }
4515 else {
4516 /* no ruby thread */
4517 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4518 }
4519 }
4520
4521 /* We have discussions whether we should die here; */
4522 /* We might rethink about it later. */
4523 exit(EXIT_FAILURE);
4524}
4525
4526void
4528{
4529 /* the `GET_VM()->special_exceptions` below assumes that
4530 * the VM is reachable from the current thread. We should
4531 * definitely make sure of that. */
4532 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
4533
4534 rb_execution_context_t *ec = GET_EC();
4535 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
4536
4537 if (!exc ||
4538 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
4539 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
4540 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4541 exit(EXIT_FAILURE);
4542 }
4543 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
4544 rb_ec_raised_clear(ec);
4545 }
4546 else {
4547 rb_ec_raised_set(ec, RAISED_NOMEMORY);
4548 exc = ruby_vm_special_exception_copy(exc);
4549 }
4550 ec->errinfo = exc;
4551 EC_JUMP_TAG(ec, TAG_RAISE);
4552}
4553
4554bool
4555rb_memerror_reentered(void)
4556{
4557 rb_execution_context_t *ec = GET_EC();
4558 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
4559}
4560
4561void
4562rb_malloc_info_show_results(void)
4563{
4564}
4565
4566static void *
4567handle_malloc_failure(void *ptr)
4568{
4569 if (LIKELY(ptr)) {
4570 return ptr;
4571 }
4572 else {
4573 ruby_memerror();
4574 UNREACHABLE_RETURN(ptr);
4575 }
4576}
4577
4578static void *ruby_xmalloc_body(size_t size);
4579
4580void *
4581ruby_xmalloc(size_t size)
4582{
4583 return handle_malloc_failure(ruby_xmalloc_body(size));
4584}
4585
4586static void *
4587ruby_xmalloc_body(size_t size)
4588{
4589 if ((ssize_t)size < 0) {
4590 negative_size_allocation_error("too large allocation size");
4591 }
4592
4593 return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
4594}
4595
4596void
4597ruby_malloc_size_overflow(size_t count, size_t elsize)
4598{
4599 rb_raise(rb_eArgError,
4600 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
4601 count, elsize);
4602}
4603
4604static void *ruby_xmalloc2_body(size_t n, size_t size);
4605
4606void *
4607ruby_xmalloc2(size_t n, size_t size)
4608{
4609 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
4610}
4611
4612static void *
4613ruby_xmalloc2_body(size_t n, size_t size)
4614{
4615 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4616}
4617
4618static void *ruby_xcalloc_body(size_t n, size_t size);
4619
4620void *
4621ruby_xcalloc(size_t n, size_t size)
4622{
4623 return handle_malloc_failure(ruby_xcalloc_body(n, size));
4624}
4625
4626static void *
4627ruby_xcalloc_body(size_t n, size_t size)
4628{
4629 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4630}
4631
4632static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
4633
4634#ifdef ruby_sized_xrealloc
4635#undef ruby_sized_xrealloc
4636#endif
4637void *
4638ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
4639{
4640 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
4641}
4642
4643static void *
4644ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
4645{
4646 if ((ssize_t)new_size < 0) {
4647 negative_size_allocation_error("too large allocation size");
4648 }
4649
4650 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
4651}
4652
4653void *
4654ruby_xrealloc(void *ptr, size_t new_size)
4655{
4656 return ruby_sized_xrealloc(ptr, new_size, 0);
4657}
4658
4659static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
4660
4661#ifdef ruby_sized_xrealloc2
4662#undef ruby_sized_xrealloc2
4663#endif
4664void *
4665ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
4666{
4667 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
4668}
4669
4670static void *
4671ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
4672{
4673 size_t len = xmalloc2_size(n, size);
4674 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
4675}
4676
4677void *
4678ruby_xrealloc2(void *ptr, size_t n, size_t size)
4679{
4680 return ruby_sized_xrealloc2(ptr, n, size, 0);
4681}
4682
4683#ifdef ruby_sized_xfree
4684#undef ruby_sized_xfree
4685#endif
4686void
4687ruby_sized_xfree(void *x, size_t size)
4688{
4689 if (LIKELY(x)) {
4690 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
4691 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
4692 * that case. */
4693 if (LIKELY(GET_VM())) {
4694 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
4695 }
4696 else {
4697 ruby_mimfree(x);
4698 }
4699 }
4700}
4701
4702void
4703ruby_xfree(void *x)
4704{
4705 ruby_sized_xfree(x, 0);
4706}
4707
4708void *
4709rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4710{
4711 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4712 return ruby_xmalloc(w);
4713}
4714
4715void *
4716rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4717{
4718 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4719 return ruby_xcalloc(w, 1);
4720}
4721
4722void *
4723rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
4724{
4725 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4726 return ruby_xrealloc((void *)p, w);
4727}
4728
4729void *
4730rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4731{
4732 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4733 return ruby_xmalloc(u);
4734}
4735
4736void *
4737rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4738{
4739 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4740 return ruby_xcalloc(u, 1);
4741}
4742
4743/* Mimic ruby_xmalloc, but need not rb_objspace.
4744 * should return pointer suitable for ruby_xfree
4745 */
4746void *
4747ruby_mimmalloc(size_t size)
4748{
4749 void *mem;
4750#if CALC_EXACT_MALLOC_SIZE
4751 size += sizeof(struct malloc_obj_info);
4752#endif
4753 mem = malloc(size);
4754#if CALC_EXACT_MALLOC_SIZE
4755 if (!mem) {
4756 return NULL;
4757 }
4758 else
4759 /* set 0 for consistency of allocated_size/allocations */
4760 {
4761 struct malloc_obj_info *info = mem;
4762 info->size = 0;
4763 mem = info + 1;
4764 }
4765#endif
4766 return mem;
4767}
4768
4769void *
4770ruby_mimcalloc(size_t num, size_t size)
4771{
4772 void *mem;
4773#if CALC_EXACT_MALLOC_SIZE
4774 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
4775 if (UNLIKELY(t.left)) {
4776 return NULL;
4777 }
4778 size = t.right + sizeof(struct malloc_obj_info);
4779 mem = calloc1(size);
4780 if (!mem) {
4781 return NULL;
4782 }
4783 else
4784 /* set 0 for consistency of allocated_size/allocations */
4785 {
4786 struct malloc_obj_info *info = mem;
4787 info->size = 0;
4788 mem = info + 1;
4789 }
4790#else
4791 mem = calloc(num, size);
4792#endif
4793 return mem;
4794}
4795
4796void
4797ruby_mimfree(void *ptr)
4798{
4799#if CALC_EXACT_MALLOC_SIZE
4800 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
4801 ptr = info;
4802#endif
4803 free(ptr);
4804}
4805
4806void
4807rb_gc_adjust_memory_usage(ssize_t diff)
4808{
4809 unless_objspace(objspace) { return; }
4810
4811 rb_gc_impl_adjust_memory_usage(objspace, diff);
4812}
4813
4814const char *
4815rb_obj_info(VALUE obj)
4816{
4817 return obj_info(obj);
4818}
4819
4820void
4821rb_obj_info_dump(VALUE obj)
4822{
4823 char buff[0x100];
4824 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
4825}
4826
4827void
4828rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
4829{
4830 char buff[0x100];
4831 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
4832}
4833
4834void
4835rb_gc_before_fork(void)
4836{
4837 rb_gc_impl_before_fork(rb_gc_get_objspace());
4838}
4839
4840void
4841rb_gc_after_fork(rb_pid_t pid)
4842{
4843 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
4844}
4845
4846/*
4847 * Document-module: ObjectSpace
4848 *
4849 * The ObjectSpace module contains a number of routines
4850 * that interact with the garbage collection facility and allow you to
4851 * traverse all living objects with an iterator.
4852 *
4853 * ObjectSpace also provides support for object finalizers, procs that will be
4854 * called after a specific object was destroyed by garbage collection. See
4855 * the documentation for +ObjectSpace.define_finalizer+ for important
4856 * information on how to use this method correctly.
4857 *
4858 * a = "A"
4859 * b = "B"
4860 *
4861 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
4862 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
4863 *
4864 * a = nil
4865 * b = nil
4866 *
4867 * _produces:_
4868 *
4869 * Finalizer two on 537763470
4870 * Finalizer one on 537763480
4871 */
4872
4873/* Document-class: GC::Profiler
4874 *
4875 * The GC profiler provides access to information on GC runs including time,
4876 * length and object space size.
4877 *
4878 * Example:
4879 *
4880 * GC::Profiler.enable
4881 *
4882 * require 'rdoc/rdoc'
4883 *
4884 * GC::Profiler.report
4885 *
4886 * GC::Profiler.disable
4887 *
4888 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
4889 */
4890
4891#include "gc.rbinc"
4892
4893void
4894Init_GC(void)
4895{
4896#undef rb_intern
4897 malloc_offset = gc_compute_malloc_offset();
4898
4899 rb_mGC = rb_define_module("GC");
4900
4901 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
4902
4903 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
4904
4905 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
4906 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
4907
4908 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
4909
4910 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
4911
4912 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
4913 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
4914
4915 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
4916
4917 rb_gc_impl_init();
4918}
4919
4920// Set a name for the anonymous virtual memory area. `addr` is the starting
4921// address of the area and `size` is its length in bytes. `name` is a
4922// NUL-terminated human-readable string.
4923//
4924// This function is usually called after calling `mmap()`. The human-readable
4925// annotation helps developers identify the call site of `mmap()` that created
4926// the memory mapping.
4927//
4928// This function currently only works on Linux 5.17 or higher. After calling
4929// this function, we can see annotations in the form of "[anon:...]" in
4930// `/proc/self/maps`, where `...` is the content of `name`. This function has
4931// no effect when called on other platforms.
4932void
4933ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
4934{
4935#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
4936 // The name length cannot exceed 80 (including the '\0').
4937 RUBY_ASSERT(strlen(name) < 80);
4938 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
4939 // We ignore errors in prctl. prctl may set errno to EINVAL for several
4940 // reasons.
4941 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
4942 // 2. addr is an invalid address.
4943 // 3. The string pointed by name is too long.
4944 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
4945 // happen if we run the compiled binary on an old kernel. In theory, all
4946 // other errors should result in a failure. But since EINVAL cannot tell
4947 // the first error from others, and this function is mainly used for
4948 // debugging, we silently ignore the error.
4949 errno = 0;
4950#endif
4951}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:140
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:469
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:199
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1095
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2635
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:122
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2187
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2227
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
Definition object.c:98
VALUE rb_mKernel
Kernel module.
Definition object.c:65
VALUE rb_mGC
GC module.
Definition gc.c:431
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3192
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition defines.h:89
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:905
void rb_memerror(void)
Triggers out-of-memory error.
Definition gc.c:4527
VALUE rb_gc_disable(void)
Disables GC.
Definition gc.c:3917
VALUE rb_gc_start(void)
Identical to rb_gc(), except the return value.
Definition gc.c:3711
VALUE rb_gc_enable(void)
(Re-) enables GC.
Definition gc.c:3883
int rb_during_gc(void)
Queries if the GC is busy.
Definition gc.c:3726
void rb_gc(void)
Triggers a GC process.
Definition gc.c:3718
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
Definition gc.c:3734
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition hash.c:1477
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:839
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1682
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:959
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:382
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1231
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1291
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1297
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2944
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:971
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition io.h:2
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1904
bool ruby_free_at_exit_p(void)
Returns whether the Ruby VM will free all memory at shutdown.
Definition vm.c:4507
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:136
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
struct rb_data_type_struct rb_data_type_t
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:197
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:94
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:507
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5556
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:83
"Typed" user data.
Definition rtypeddata.h:350
Definition class.h:36
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:207
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Represents the region of a capture group.
Definition rmatch.h:65
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113