Ruby 3.4.5p51 (2025-07-16 revision 20cda200d3ce092571d0b5d342dadca69636cb0f)
imemo.c
1
2#include "constant.h"
3#include "id_table.h"
4#include "internal.h"
5#include "internal/imemo.h"
6#include "vm_callinfo.h"
7
8size_t rb_iseq_memsize(const rb_iseq_t *iseq);
9void rb_iseq_mark_and_move(rb_iseq_t *iseq, bool reference_updating);
10void rb_iseq_free(const rb_iseq_t *iseq);
11
12const char *
13rb_imemo_name(enum imemo_type type)
14{
15 // put no default case to get a warning if an imemo type is missing
16 switch (type) {
17#define IMEMO_NAME(x) case imemo_##x: return #x;
18 IMEMO_NAME(ast);
19 IMEMO_NAME(callcache);
20 IMEMO_NAME(callinfo);
21 IMEMO_NAME(constcache);
22 IMEMO_NAME(cref);
23 IMEMO_NAME(env);
24 IMEMO_NAME(ifunc);
25 IMEMO_NAME(iseq);
26 IMEMO_NAME(memo);
27 IMEMO_NAME(ment);
28 IMEMO_NAME(parser_strterm);
29 IMEMO_NAME(svar);
30 IMEMO_NAME(throw_data);
31 IMEMO_NAME(tmpbuf);
32#undef IMEMO_NAME
33 default:
34 rb_bug("unreachable");
35 }
36}
37
38/* =========================================================================
39 * allocation
40 * ========================================================================= */
41
43rb_imemo_new(enum imemo_type type, VALUE v0)
44{
45 size_t size = RVALUE_SIZE;
47 NEWOBJ_OF(obj, void, v0, flags, size, 0);
48
49 return (VALUE)obj;
50}
51
52static rb_imemo_tmpbuf_t *
53rb_imemo_tmpbuf_new(void)
54{
55 size_t size = sizeof(struct rb_imemo_tmpbuf_struct);
56 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
57 NEWOBJ_OF(obj, struct rb_imemo_tmpbuf_struct, 0, flags, size, 0);
58
59 return obj;
60}
61
62void *
63rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
64{
65 void *ptr;
66 rb_imemo_tmpbuf_t *tmpbuf;
67
68 /* Keep the order; allocate an empty imemo first then xmalloc, to
69 * get rid of potential memory leak */
70 tmpbuf = rb_imemo_tmpbuf_new();
71 *store = (VALUE)tmpbuf;
72 ptr = ruby_xmalloc(size);
73 tmpbuf->ptr = ptr;
74 tmpbuf->cnt = cnt;
75
76 return ptr;
77}
78
79void *
80rb_alloc_tmp_buffer(volatile VALUE *store, long len)
81{
82 long cnt;
83
84 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
85 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
86 }
87
88 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
89}
90
91void
92rb_free_tmp_buffer(volatile VALUE *store)
93{
94 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
95 if (s) {
96 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
97 s->cnt = 0;
98 ruby_xfree(ptr);
99 }
100}
101
102rb_imemo_tmpbuf_t *
103rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
104{
105 rb_imemo_tmpbuf_t *tmpbuf = rb_imemo_tmpbuf_new();
106 tmpbuf->ptr = buf;
107 tmpbuf->next = old_heap;
108 tmpbuf->cnt = cnt;
109
110 return tmpbuf;
111}
112
113#if IMEMO_DEBUG
114VALUE
115rb_imemo_new_debug(enum imemo_type type, VALUE v0, const char *file, int line)
116{
117 VALUE memo = rb_imemo_new(type, v0);
118 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
119 return memo;
120}
121#endif
122
123/* =========================================================================
124 * memsize
125 * ========================================================================= */
126
127size_t
128rb_imemo_memsize(VALUE obj)
129{
130 size_t size = 0;
131 switch (imemo_type(obj)) {
132 case imemo_ast:
133 rb_bug("imemo_ast is obsolete");
134
135 break;
136 case imemo_callcache:
137 break;
138 case imemo_callinfo:
139 break;
140 case imemo_constcache:
141 break;
142 case imemo_cref:
143 break;
144 case imemo_env:
145 size += ((rb_env_t *)obj)->env_size * sizeof(VALUE);
146
147 break;
148 case imemo_ifunc:
149 break;
150 case imemo_iseq:
151 size += rb_iseq_memsize((rb_iseq_t *)obj);
152
153 break;
154 case imemo_memo:
155 break;
156 case imemo_ment:
157 size += sizeof(((rb_method_entry_t *)obj)->def);
158
159 break;
160 case imemo_parser_strterm:
161 break;
162 case imemo_svar:
163 break;
164 case imemo_throw_data:
165 break;
166 case imemo_tmpbuf:
167 size += ((rb_imemo_tmpbuf_t *)obj)->cnt * sizeof(VALUE);
168
169 break;
170 default:
171 rb_bug("unreachable");
172 }
173
174 return size;
175}
176
177/* =========================================================================
178 * mark
179 * ========================================================================= */
180
181static enum rb_id_table_iterator_result
182cc_table_mark_i(VALUE ccs_ptr, void *data)
183{
184 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
185 VM_ASSERT(vm_ccs_p(ccs));
186#if VM_CHECK_MODE > 0
187 VALUE klass = (VALUE)data;
188
189 VALUE lookup_val;
190 VM_ASSERT(rb_id_table_lookup(RCLASS_CC_TBL(klass), ccs->cme->called_id, &lookup_val));
191 VM_ASSERT(lookup_val == ccs_ptr);
192#endif
193
194 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
195 rb_vm_ccs_free(ccs);
196 return ID_TABLE_DELETE;
197 }
198 else {
199 rb_gc_mark_movable((VALUE)ccs->cme);
200
201 for (int i=0; i<ccs->len; i++) {
202 VM_ASSERT(klass == ccs->entries[i].cc->klass);
203 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
204
205 rb_gc_mark_movable((VALUE)ccs->entries[i].cc);
206 }
207 return ID_TABLE_CONTINUE;
208 }
209}
210
211void
212rb_cc_table_mark(VALUE klass)
213{
214 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
215 if (cc_tbl) {
216 rb_id_table_foreach_values(cc_tbl, cc_table_mark_i, (void *)klass);
217 }
218}
219
220static bool
221moved_or_living_object_strictly_p(VALUE obj)
222{
223 return obj && (!rb_objspace_garbage_object_p(obj) || BUILTIN_TYPE(obj) == T_MOVED);
224}
225
226static void
227mark_and_move_method_entry(rb_method_entry_t *ment, bool reference_updating)
228{
229 rb_method_definition_t *def = ment->def;
230
231 rb_gc_mark_and_move(&ment->owner);
232 rb_gc_mark_and_move(&ment->defined_class);
233
234 if (def) {
235 switch (def->type) {
236 case VM_METHOD_TYPE_ISEQ:
237 if (def->body.iseq.iseqptr) {
238 rb_gc_mark_and_move_ptr(&def->body.iseq.iseqptr);
239 }
240 rb_gc_mark_and_move_ptr(&def->body.iseq.cref);
241
242 if (!reference_updating) {
243 if (def->iseq_overload && ment->defined_class) {
244 // it can be a key of "overloaded_cme" table
245 // so it should be pinned.
246 rb_gc_mark((VALUE)ment);
247 }
248 }
249 break;
250 case VM_METHOD_TYPE_ATTRSET:
251 case VM_METHOD_TYPE_IVAR:
252 rb_gc_mark_and_move(&def->body.attr.location);
253 break;
254 case VM_METHOD_TYPE_BMETHOD:
255 rb_gc_mark_and_move(&def->body.bmethod.proc);
256 if (!reference_updating) {
257 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
258 }
259 break;
260 case VM_METHOD_TYPE_ALIAS:
261 rb_gc_mark_and_move_ptr(&def->body.alias.original_me);
262 return;
263 case VM_METHOD_TYPE_REFINED:
264 rb_gc_mark_and_move_ptr(&def->body.refined.orig_me);
265 break;
266 case VM_METHOD_TYPE_CFUNC:
267 case VM_METHOD_TYPE_ZSUPER:
268 case VM_METHOD_TYPE_MISSING:
269 case VM_METHOD_TYPE_OPTIMIZED:
270 case VM_METHOD_TYPE_UNDEF:
271 case VM_METHOD_TYPE_NOTIMPLEMENTED:
272 break;
273 }
274 }
275}
276
277void
278rb_imemo_mark_and_move(VALUE obj, bool reference_updating)
279{
280 switch (imemo_type(obj)) {
281 case imemo_ast:
282 rb_bug("imemo_ast is obsolete");
283
284 break;
285 case imemo_callcache: {
286 /* cc is callcache.
287 *
288 * cc->klass (klass) should not be marked because if the klass is
289 * free'ed, the cc->klass will be cleared by `vm_cc_invalidate()`.
290 *
291 * cc->cme (cme) should not be marked because if cc is invalidated
292 * when cme is free'ed.
293 * - klass marks cme if klass uses cme.
294 * - caller classe's ccs->cme marks cc->cme.
295 * - if cc is invalidated (klass doesn't refer the cc),
296 * cc is invalidated by `vm_cc_invalidate()` and cc->cme is
297 * not be accessed.
298 * - On the multi-Ractors, cme will be collected with global GC
299 * so that it is safe if GC is not interleaving while accessing
300 * cc and cme.
301 * - However, cc_type_super and cc_type_refinement are not chained
302 * from ccs so cc->cme should be marked; the cme might be
303 * reachable only through cc in these cases.
304 */
305 struct rb_callcache *cc = (struct rb_callcache *)obj;
306 if (reference_updating) {
307 if (!cc->klass) {
308 // already invalidated
309 }
310 else {
311 if (moved_or_living_object_strictly_p(cc->klass) &&
312 moved_or_living_object_strictly_p((VALUE)cc->cme_)) {
313 *((VALUE *)&cc->klass) = rb_gc_location(cc->klass);
314 *((struct rb_callable_method_entry_struct **)&cc->cme_) =
315 (struct rb_callable_method_entry_struct *)rb_gc_location((VALUE)cc->cme_);
316 }
317 else {
318 vm_cc_invalidate(cc);
319 }
320 }
321 }
322 else {
323 if (vm_cc_super_p(cc) || vm_cc_refinement_p(cc)) {
324 rb_gc_mark_movable((VALUE)cc->cme_);
325 rb_gc_mark_movable((VALUE)cc->klass);
326 }
327 }
328
329 break;
330 }
331 case imemo_callinfo:
332 break;
333 case imemo_constcache: {
335
336 rb_gc_mark_and_move(&ice->value);
337
338 break;
339 }
340 case imemo_cref: {
341 rb_cref_t *cref = (rb_cref_t *)obj;
342
343 rb_gc_mark_and_move(&cref->klass_or_self);
344 rb_gc_mark_and_move_ptr(&cref->next);
345 rb_gc_mark_and_move(&cref->refinements);
346
347 break;
348 }
349 case imemo_env: {
350 rb_env_t *env = (rb_env_t *)obj;
351
352 if (LIKELY(env->ep)) {
353 // just after newobj() can be NULL here.
354 RUBY_ASSERT(rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]) == rb_gc_location(obj));
355 RUBY_ASSERT(reference_updating || VM_ENV_ESCAPED_P(env->ep));
356
357 for (unsigned int i = 0; i < env->env_size; i++) {
358 rb_gc_mark_and_move((VALUE *)&env->env[i]);
359 }
360
361 rb_gc_mark_and_move_ptr(&env->iseq);
362
363 if (reference_updating) {
364 ((VALUE *)env->ep)[VM_ENV_DATA_INDEX_ENV] = rb_gc_location(env->ep[VM_ENV_DATA_INDEX_ENV]);
365 }
366 else {
367 if (!VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_WB_REQUIRED)) {
368 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
369 }
370 rb_gc_mark_movable( (VALUE)rb_vm_env_prev_env(env));
371 }
372 }
373
374 break;
375 }
376 case imemo_ifunc: {
377 struct vm_ifunc *ifunc = (struct vm_ifunc *)obj;
378
379 if (!reference_updating) {
380 rb_gc_mark_maybe((VALUE)ifunc->data);
381 }
382
383 break;
384 }
385 case imemo_iseq:
386 rb_iseq_mark_and_move((rb_iseq_t *)obj, reference_updating);
387 break;
388 case imemo_memo: {
389 struct MEMO *memo = (struct MEMO *)obj;
390
391 rb_gc_mark_and_move((VALUE *)&memo->v1);
392 rb_gc_mark_and_move((VALUE *)&memo->v2);
393 if (!reference_updating) {
394 rb_gc_mark_maybe(memo->u3.value);
395 }
396
397 break;
398 }
399 case imemo_ment:
400 mark_and_move_method_entry((rb_method_entry_t *)obj, reference_updating);
401 break;
402 case imemo_parser_strterm:
403 break;
404 case imemo_svar: {
405 struct vm_svar *svar = (struct vm_svar *)obj;
406
407 rb_gc_mark_and_move((VALUE *)&svar->cref_or_me);
408 rb_gc_mark_and_move((VALUE *)&svar->lastline);
409 rb_gc_mark_and_move((VALUE *)&svar->backref);
410 rb_gc_mark_and_move((VALUE *)&svar->others);
411
412 break;
413 }
414 case imemo_throw_data: {
415 struct vm_throw_data *throw_data = (struct vm_throw_data *)obj;
416
417 rb_gc_mark_and_move((VALUE *)&throw_data->throw_obj);
418
419 break;
420 }
421 case imemo_tmpbuf: {
422 const rb_imemo_tmpbuf_t *m = (const rb_imemo_tmpbuf_t *)obj;
423
424 if (!reference_updating) {
425 do {
426 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
427 } while ((m = m->next) != NULL);
428 }
429
430 break;
431 }
432 default:
433 rb_bug("unreachable");
434 }
435}
436
437/* =========================================================================
438 * free
439 * ========================================================================= */
440
441static enum rb_id_table_iterator_result
442free_const_entry_i(VALUE value, void *data)
443{
444 rb_const_entry_t *ce = (rb_const_entry_t *)value;
445 xfree(ce);
446 return ID_TABLE_CONTINUE;
447}
448
449void
450rb_free_const_table(struct rb_id_table *tbl)
451{
452 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
453 rb_id_table_free(tbl);
454}
455
456// alive: if false, target pointers can be freed already.
457static void
458vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, VALUE klass)
459{
460 if (ccs->entries) {
461 for (int i=0; i<ccs->len; i++) {
462 const struct rb_callcache *cc = ccs->entries[i].cc;
463 if (!alive) {
464 // ccs can be free'ed.
465 if (rb_gc_pointer_to_heap_p((VALUE)cc) &&
466 !rb_objspace_garbage_object_p((VALUE)cc) &&
467 IMEMO_TYPE_P(cc, imemo_callcache) &&
468 cc->klass == klass) {
469 // OK. maybe target cc.
470 }
471 else {
472 continue;
473 }
474 }
475
476 VM_ASSERT(!vm_cc_super_p(cc) && !vm_cc_refinement_p(cc));
477 vm_cc_invalidate(cc);
478 }
479 ruby_xfree(ccs->entries);
480 }
481 ruby_xfree(ccs);
482}
483
484void
485rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
486{
487 RB_DEBUG_COUNTER_INC(ccs_free);
488 vm_ccs_free(ccs, true, Qundef);
489}
490
491static enum rb_id_table_iterator_result
492cc_table_free_i(VALUE ccs_ptr, void *data)
493{
494 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
495 VALUE klass = (VALUE)data;
496 VM_ASSERT(vm_ccs_p(ccs));
497
498 vm_ccs_free(ccs, false, klass);
499
500 return ID_TABLE_CONTINUE;
501}
502
503void
504rb_cc_table_free(VALUE klass)
505{
506 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
507
508 if (cc_tbl) {
509 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, (void *)klass);
510 rb_id_table_free(cc_tbl);
511 }
512}
513
514void
515rb_imemo_free(VALUE obj)
516{
517 switch (imemo_type(obj)) {
518 case imemo_ast:
519 rb_bug("imemo_ast is obsolete");
520
521 break;
522 case imemo_callcache:
523 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
524
525 break;
526 case imemo_callinfo:{
527 const struct rb_callinfo *ci = ((const struct rb_callinfo *)obj);
528
529 if (ci->kwarg) {
530 ((struct rb_callinfo_kwarg *)ci->kwarg)->references--;
531 if (ci->kwarg->references == 0) xfree((void *)ci->kwarg);
532 }
533 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
534
535 break;
536 }
537 case imemo_constcache:
538 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
539
540 break;
541 case imemo_cref:
542 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
543
544 break;
545 case imemo_env: {
546 rb_env_t *env = (rb_env_t *)obj;
547
548 RUBY_ASSERT(VM_ENV_ESCAPED_P(env->ep));
549 xfree((VALUE *)env->env);
550 RB_DEBUG_COUNTER_INC(obj_imemo_env);
551
552 break;
553 }
554 case imemo_ifunc:
555 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
556 break;
557 case imemo_iseq:
558 rb_iseq_free((rb_iseq_t *)obj);
559 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
560
561 break;
562 case imemo_memo:
563 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
564
565 break;
566 case imemo_ment:
567 rb_free_method_entry((rb_method_entry_t *)obj);
568 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
569
570 break;
571 case imemo_parser_strterm:
572 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
573
574 break;
575 case imemo_svar:
576 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
577 break;
578 case imemo_throw_data:
579 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
580
581 break;
582 case imemo_tmpbuf:
583 xfree(((rb_imemo_tmpbuf_t *)obj)->ptr);
584 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
585
586 break;
587 default:
588 rb_bug("unreachable");
589 }
590}
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:69
int len
Length of the buffer.
Definition io.h:8
VALUE type(ANYARGS)
ANYARGS-ed function type.
MEMO.
Definition imemo.h:109
Definition vm_core.h:259
Definition method.h:62
rb_cref_t * cref
class reference, should be marked
Definition method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
IFUNC (Internal FUNCtion)
Definition imemo.h:88
SVAR (Special VARiable)
Definition imemo.h:52
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:54
THROW_DATA.
Definition imemo.h:61
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40