From 467942436a1f7bb0abf1453cc89d01ad2fdf0d84 Mon Sep 17 00:00:00 2001 From: nozomemein Date: Thu, 16 Apr 2026 07:09:06 +0900 Subject: [PATCH 1/5] ZJIT: Stabilize polymorphic getivar tests Avoid receiver shapes near the embedded/heap boundary in the polymorphic getivar tests. Make both receivers clearly heap-backed so stats and dev builds still exercise distinct @foo slots without depending on embedded_p(). --- zjit/src/hir/opt_tests.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/zjit/src/hir/opt_tests.rs b/zjit/src/hir/opt_tests.rs index 143201134cba3c..42c23ca48d02fe 100644 --- a/zjit/src/hir/opt_tests.rs +++ b/zjit/src/hir/opt_tests.rs @@ -7912,12 +7912,13 @@ mod hir_opt_tests { set_call_threshold(3); eval(r#" class C - def foo_then_bar + def foo_then_many @foo = 1 + 1000.times { |i| instance_variable_set(:"@v#{i}", i) } @bar = 2 end - def bar_then_foo + def many_then_foo 1000.times { |i| instance_variable_set(:"@v#{i}", i) } @bar = 3 @foo = 4 @@ -7927,14 +7928,14 @@ mod hir_opt_tests { end O1 = C.new - O1.foo_then_bar + O1.foo_then_many O2 = C.new - O2.bar_then_foo + O2.many_then_foo O1.foo O2.foo "#); assert_snapshot!(hir_string_proc("C.instance_method(:foo)"), @" - fn foo@:14: + fn foo@:15: bb1(): EntryPoint interpreter v1:BasicObject = LoadSelf @@ -7986,12 +7987,13 @@ mod hir_opt_tests { set_call_threshold(6); eval(r#" class C - def foo_then_bar + def foo_then_many @foo = 1 + 1000.times { |i| instance_variable_set(:"@v#{i}", i) } @bar = 2 end - def bar_then_foo + def many_then_foo 1000.times { |i| instance_variable_set(:"@v#{i}", i) } @bar = 3 @foo = 4 @@ -8001,9 +8003,9 @@ mod hir_opt_tests { end O1 = C.new - O1.foo_then_bar + O1.foo_then_many O2 = C.new - O2.bar_then_foo + O2.many_then_foo O1.foo O1.foo O1.foo @@ -8011,7 +8013,7 @@ mod hir_opt_tests { O2.foo "#); assert_snapshot!(hir_string_proc("C.instance_method(:foo)"), @" - fn foo@:14: + fn foo@:15: bb1(): EntryPoint interpreter v1:BasicObject = LoadSelf From 18b0e3f80c3ff28fed6936da39c8466afb66200d Mon Sep 17 00:00:00 2001 From: nozomemein Date: Thu, 16 Apr 2026 07:48:24 +0900 Subject: [PATCH 2/5] ZJIT: Stabilize setivar shape/capacity transition tests The old setivar opt tests depended on embedded-capacity boundaries that vary between dev and stats builds. Compute the boundary from GC::INTERNAL_CONSTANTS and construct receivers so the tests reliably cover shape transition and heap-backed self upgrade paths. --- zjit/src/hir/opt_tests.rs | 109 +++++++++++++++++++++++--------------- 1 file changed, 67 insertions(+), 42 deletions(-) diff --git a/zjit/src/hir/opt_tests.rs b/zjit/src/hir/opt_tests.rs index 42c23ca48d02fe..a735665bb24588 100644 --- a/zjit/src/hir/opt_tests.rs +++ b/zjit/src/hir/opt_tests.rs @@ -5645,15 +5645,38 @@ mod hir_opt_tests { #[test] fn test_specialize_multiple_monomorphic_setivar_with_shape_transition() { - eval(" - def test - @foo = 1 - @bar = 2 + // Compute the embedded-capacity boundary for this build so @foo stays + // in-capacity while @bar reliably crosses it in both dev and stats modes. + // See gc/default/default.c for GC::INTERNAL_CONSTANTS and + // shape.c::Init_default_shapes for the embedded-capacity calculation + // that backs shape capacities. + eval(r#" + word_size = [0].pack("J").bytesize + embed_cap = (GC::INTERNAL_CONSTANTS[:RVALUE_SIZE] - GC::INTERNAL_CONSTANTS[:RBASIC_SIZE]) / word_size + + klass = Class.new do + define_method(:initialize) do + # Leave one embedded slot free so @foo stays in-capacity. + (embed_cap - 1).times { |i| instance_variable_set(:"@v#{i}", i) } + end + + def test + @foo = 1 + @bar = 2 + end end - test - "); - assert_snapshot!(hir_string("test"), @" - fn test@:3: + + # Grow class max_iv_count so fresh instances start with embed_cap slots. + # See gc.c::rb_class_allocate_instance + warm = klass.new + warm.instance_variable_set(:"@warm#{embed_cap}", embed_cap) + + obj = klass.new + obj.test + TEST = klass.instance_method(:test) + "#); + assert_snapshot!(hir_string_proc("TEST"), @" + fn test@:12: bb1(): EntryPoint interpreter v1:BasicObject = LoadSelf @@ -15113,17 +15136,33 @@ mod hir_opt_tests { #[test] fn upgrade_self_type_to_heap_after_setivar() { - eval(" - def test - @a = 1 - @b = 2 - @c = 3 - @d = 4 - end - test - "); - assert_snapshot!(hir_string("test"), @" - fn test@:3: + // Fill the receiver to the embedded-capacity boundary in initialize so + // the first write overflows into the heap and the next write can use + // the upgraded heap-backed self type. + // See gc/default/default.c for GC::INTERNAL_CONSTANTS and + // shape.c::Init_default_shapes for the embedded-capacity calculation + // that backs shape capacities. + eval(r#" + word_size = [0].pack("J").bytesize + embed_cap = (GC::INTERNAL_CONSTANTS[:RVALUE_SIZE] - GC::INTERNAL_CONSTANTS[:RBASIC_SIZE]) / word_size + + klass = Class.new do + define_method(:initialize) do + embed_cap.times { |i| instance_variable_set(:"@v#{i}", i) } + end + + def test + @overflow = 1 + @after = 2 + end + end + + obj = klass.new + obj.test + TEST = klass.instance_method(:test) + "#); + assert_snapshot!(hir_string_proc("TEST"), @" + fn test@:11: bb1(): EntryPoint interpreter v1:BasicObject = LoadSelf @@ -15135,33 +15174,19 @@ mod hir_opt_tests { bb3(v6:BasicObject): v10:Fixnum[1] = Const Value(1) PatchPoint SingleRactorMode - v42:HeapBasicObject = GuardType v6, HeapBasicObject - v43:CShape = LoadField v42, :_shape_id@0x1000 - v44:CShape[0x1001] = GuardBitEquals v43, CShape(0x1001) - StoreField v42, :@a@0x1002, v10 - WriteBarrier v42, v10 - v47:CShape[0x1003] = Const CShape(0x1003) - StoreField v42, :_shape_id@0x1000, v47 + SetIvar v6, :@overflow, v10 v14:HeapBasicObject = RefineType v6, HeapBasicObject v17:Fixnum[2] = Const Value(2) PatchPoint SingleRactorMode - SetIvar v14, :@b, v17 - v21:HeapBasicObject = RefineType v14, HeapBasicObject - v24:Fixnum[3] = Const Value(3) - PatchPoint SingleRactorMode - SetIvar v21, :@c, v24 - v28:HeapBasicObject = RefineType v21, HeapBasicObject - v31:Fixnum[4] = Const Value(4) - PatchPoint SingleRactorMode - v50:CShape = LoadField v28, :_shape_id@0x1000 - v51:CShape[0x1004] = GuardBitEquals v50, CShape(0x1004) - v52:CPtr = LoadField v28, :_as_heap@0x1002 - StoreField v52, :@d@0x1005, v31 - WriteBarrier v28, v31 - v55:CShape[0x1006] = Const CShape(0x1006) - StoreField v28, :_shape_id@0x1000, v55 + v29:CShape = LoadField v14, :_shape_id@0x1000 + v30:CShape[0x1001] = GuardBitEquals v29, CShape(0x1001) + v31:CPtr = LoadField v14, :_as_heap@0x1002 + StoreField v31, :@after@0x1003, v17 + WriteBarrier v14, v17 + v34:CShape[0x1004] = Const CShape(0x1004) + StoreField v14, :_shape_id@0x1000, v34 CheckInterrupts - Return v31 + Return v17 "); } From cb5585b79ccc5a8df30412ece3b4595a98aaa41d Mon Sep 17 00:00:00 2001 From: nozomemein Date: Fri, 1 May 2026 07:44:16 +0900 Subject: [PATCH 3/5] ZJIT: Simplify setivar shape-transition test The old test snapshot depended on the second write crossing the embedded-capacity boundary, even though the behavior under test was just the shape transition across two ivar writes. Warm the class to two ivars and assert the natural specialized path instead, so the test covers the intended shape updates without relying on GC-derived capacity behavior. --- zjit/src/hir/opt_tests.rs | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/zjit/src/hir/opt_tests.rs b/zjit/src/hir/opt_tests.rs index a735665bb24588..7ef8a8048bed3c 100644 --- a/zjit/src/hir/opt_tests.rs +++ b/zjit/src/hir/opt_tests.rs @@ -5645,38 +5645,26 @@ mod hir_opt_tests { #[test] fn test_specialize_multiple_monomorphic_setivar_with_shape_transition() { - // Compute the embedded-capacity boundary for this build so @foo stays - // in-capacity while @bar reliably crosses it in both dev and stats modes. - // See gc/default/default.c for GC::INTERNAL_CONSTANTS and - // shape.c::Init_default_shapes for the embedded-capacity calculation - // that backs shape capacities. eval(r#" - word_size = [0].pack("J").bytesize - embed_cap = (GC::INTERNAL_CONSTANTS[:RVALUE_SIZE] - GC::INTERNAL_CONSTANTS[:RBASIC_SIZE]) / word_size - klass = Class.new do - define_method(:initialize) do - # Leave one embedded slot free so @foo stays in-capacity. - (embed_cap - 1).times { |i| instance_variable_set(:"@v#{i}", i) } - end - def test @foo = 1 @bar = 2 end end - # Grow class max_iv_count so fresh instances start with embed_cap slots. - # See gc.c::rb_class_allocate_instance + # Grow class max_iv_count so fresh instances can keep both writes + # on the embedded fast path. warm = klass.new - warm.instance_variable_set(:"@warm#{embed_cap}", embed_cap) + warm.instance_variable_set(:@warm1, 1) + warm.instance_variable_set(:@warm2, 2) obj = klass.new obj.test TEST = klass.instance_method(:test) "#); assert_snapshot!(hir_string_proc("TEST"), @" - fn test@:12: + fn test@:4: bb1(): EntryPoint interpreter v1:BasicObject = LoadSelf @@ -5698,7 +5686,10 @@ mod hir_opt_tests { v14:HeapBasicObject = RefineType v6, HeapBasicObject v17:Fixnum[2] = Const Value(2) PatchPoint SingleRactorMode - SetIvar v14, :@bar, v17 + StoreField v14, :@bar@0x1004, v17 + WriteBarrier v14, v17 + v40:CShape[0x1005] = Const CShape(0x1005) + StoreField v14, :_shape_id@0x1000, v40 CheckInterrupts Return v17 "); From 138b4ba2fb455a9e9617259b59d11566a08b711a Mon Sep 17 00:00:00 2001 From: nozomemein Date: Fri, 1 May 2026 07:59:08 +0900 Subject: [PATCH 4/5] ZJIT: Skip heap-upgrade snapshot on unsupported builds Stop deriving the embedded-capacity boundary from GC internals in `upgrade_self_type_to_heap_after_setivar`. Instead, use self-consistency checks to keep the snapshot only on builds where five ivars stay embedded and the next write overflows into the heap-backed self upgrade path. --- zjit/src/hir/opt_tests.rs | 45 +++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/zjit/src/hir/opt_tests.rs b/zjit/src/hir/opt_tests.rs index 7ef8a8048bed3c..8ab9441a147b9d 100644 --- a/zjit/src/hir/opt_tests.rs +++ b/zjit/src/hir/opt_tests.rs @@ -15127,19 +15127,16 @@ mod hir_opt_tests { #[test] fn upgrade_self_type_to_heap_after_setivar() { - // Fill the receiver to the embedded-capacity boundary in initialize so - // the first write overflows into the heap and the next write can use - // the upgraded heap-backed self type. - // See gc/default/default.c for GC::INTERNAL_CONSTANTS and - // shape.c::Init_default_shapes for the embedded-capacity calculation - // that backs shape capacities. - eval(r#" - word_size = [0].pack("J").bytesize - embed_cap = (GC::INTERNAL_CONSTANTS[:RVALUE_SIZE] - GC::INTERNAL_CONSTANTS[:RBASIC_SIZE]) / word_size - + // Snapshot the overflow path only when this build naturally keeps five + // ivars embedded and overflows on the next write. + let obj = eval(r#" klass = Class.new do - define_method(:initialize) do - embed_cap.times { |i| instance_variable_set(:"@v#{i}", i) } + def initialize + @v0 = 0 + @v1 = 1 + @v2 = 2 + @v3 = 3 + @v4 = 4 end def test @@ -15148,12 +15145,28 @@ mod hir_opt_tests { end end - obj = klass.new - obj.test TEST = klass.instance_method(:test) - "#); + OBJ = klass.new + OBJ + "#); + // Skip builds where five ivars already force heap-backed storage. + if !obj.embedded_p() { + return; + } + + // Make sure the next write is the one that overflows into heap-backed + // storage, so this snapshot still exercises the self-type upgrade path. + let probe = eval(r#" + probe = OBJ.class.new + probe.instance_variable_set(:@overflow, 1) + probe + "#); + if probe.embedded_p() { + return; + } + eval("OBJ.test"); assert_snapshot!(hir_string_proc("TEST"), @" - fn test@:11: + fn test@:12: bb1(): EntryPoint interpreter v1:BasicObject = LoadSelf From 06fc5c24820e1cba55183f8d3a33959192d4bf36 Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Sat, 2 May 2026 21:21:21 -0400 Subject: [PATCH 5/5] Use EC saved in GC for root marking Since EC is thread-local, we previously used rb_gc_worker_thread_set_vm_context in MMTk worker threads to temporarily set the EC. However, this was inelegant and also occasionally caused crashes when marking threads/fibers for the current EC since it will mark the current machine stack twice (once during root marking and once for the fiber). However, since the machine stack is actively being used, the contents may be different when marking the fiber. Since all objects on the machine stack are pinned, this may cause an unpinned object to be pinned, which is not allowed in Immix. The following crash can be observed: Object 0x200fffbc7d8 is trying to pin 0x200ffc80188 0: mmtk_ruby::handle_gc_thread_panic 1: mmtk_ruby::set_panic_hook::{{closure}} 2: core::ops::function::Fn<(&'a std::panic::PanicHookInfo<'b>,), Output = ()> + core::marker::Sync + core::marker::Send> as core::ops::function::Fn<(&std::panic::PanicHookInfo,)>>::call at /rustc/59807616e1fa2540724bfbac14d7976d7e4a3860/library/alloc/src/boxed.rs:2254:9 3: std::panicking::panic_with_hook at /rustc/59807616e1fa2540724bfbac14d7976d7e4a3860/library/std/src/panicking.rs:833:13 4: std::panicking::panic_handler::{closure#0} at /rustc/59807616e1fa2540724bfbac14d7976d7e4a3860/library/std/src/panicking.rs:698:13 5: std::sys::backtrace::__rust_end_short_backtrace:: at /rustc/59807616e1fa2540724bfbac14d7976d7e4a3860/library/std/src/sys/backtrace.rs:182:18 6: __rustc::rust_begin_unwind at /rustc/59807616e1fa2540724bfbac14d7976d7e4a3860/library/std/src/panicking.rs:689:5 7: core::panicking::panic_fmt at /rustc/59807616e1fa2540724bfbac14d7976d7e4a3860/library/core/src/panicking.rs:80:14 8: >::scan_object_and_trace_edges::{{closure}} 9: mmtk_ruby::abi::ObjectClosure::c_function_registered 10: rb_mmtk_call_object_closure at gc/mmtk/mmtk.c:976:19 11: rb_gc_impl_mark_and_pin at gc/mmtk/mmtk.c:1008:5 12: rb_gc_impl_mark_and_pin at gc/mmtk/mmtk.c:1004:1 13: gc_mark_maybe_internal at gc.c:2908:5 14: gc_mark_maybe_internal at gc.c:2906:1 15: gc_mark_maybe_each_location at gc.c:2939:5 16: gc_mark_maybe_each_location at gc.c:2937:1 17: each_location at gc.c:2924:9 18: each_location_ptr at gc.c:2933:5 19: each_location_ptr at gc.c:2930:1 20: rb_gc_mark_machine_context at gc.c:3200:5 21: rb_execution_context_mark at vm.c:3768:9 22: cont_mark at cont.c:1155:5 23: fiber_mark at cont.c:1284:5 24: rb_mmtk_call_gc_mark_children at gc/mmtk/mmtk.c:318:5 25: >::scan_object_and_trace_edges::{{closure}} --- gc.c | 20 ++++++++++++++++++-- gc/default/default.c | 12 ++++++++++++ gc/gc.h | 2 -- gc/gc_impl.h | 1 + gc/mmtk/mmtk.c | 12 ++++++++---- internal/gc.h | 1 + vm.c | 4 ++-- 7 files changed, 42 insertions(+), 10 deletions(-) diff --git a/gc.c b/gc.c index e8a894e2231202..706c395e8b2d6b 100644 --- a/gc.c +++ b/gc.c @@ -191,7 +191,6 @@ rb_gc_get_ractor_newobj_cache(void) return GET_RACTOR()->newobj_cache; } -#if USE_MODULAR_GC void rb_gc_initialize_vm_context(struct rb_gc_vm_context *context) { @@ -199,6 +198,7 @@ rb_gc_initialize_vm_context(struct rb_gc_vm_context *context) context->ec = GET_EC(); } +#if USE_MODULAR_GC void rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context) { @@ -626,6 +626,7 @@ typedef struct gc_function_map { void (*config_set)(void *objspace_ptr, VALUE hash); void (*stress_set)(void *objspace_ptr, VALUE flag); VALUE (*stress_get)(void *objspace_ptr); + struct rb_gc_vm_context *(*get_vm_context)(void *objspace_ptr); // Object allocation VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size); size_t (*obj_slot_size)(VALUE obj); @@ -804,6 +805,7 @@ ruby_modular_gc_init(void) load_modular_gc_func(config_get); load_modular_gc_func(stress_set); load_modular_gc_func(stress_get); + load_modular_gc_func(get_vm_context); // Object allocation load_modular_gc_func(new_obj); load_modular_gc_func(obj_slot_size); @@ -891,6 +893,7 @@ ruby_modular_gc_init(void) # define rb_gc_impl_config_set rb_gc_functions.config_set # define rb_gc_impl_stress_set rb_gc_functions.stress_set # define rb_gc_impl_stress_get rb_gc_functions.stress_get +# define rb_gc_impl_get_vm_context rb_gc_functions.get_vm_context // Object allocation # define rb_gc_impl_new_obj rb_gc_functions.new_obj # define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size @@ -3238,10 +3241,23 @@ gc_declarative_marking_p(const rb_data_type_t *type) return (type->flags & RUBY_TYPED_DECL_MARKING) != 0; } +rb_execution_context_t * +rb_gc_get_ec(void) +{ + void *objspace = rb_gc_get_objspace(); + + if (RB_LIKELY(rb_gc_impl_during_gc_p(objspace))) { + return rb_gc_impl_get_vm_context(objspace)->ec; + } + else { + return GET_EC(); + } +} + void rb_gc_mark_roots(void *objspace, const char **categoryp) { - rb_execution_context_t *ec = GET_EC(); + rb_execution_context_t *ec = rb_gc_get_ec(); rb_vm_t *vm = rb_ec_vm_ptr(ec); #define MARK_CHECKPOINT(category) do { \ diff --git a/gc/default/default.c b/gc/default/default.c index 309f47ac4f4de9..d8dfdfa13dca0f 100644 --- a/gc/default/default.c +++ b/gc/default/default.c @@ -683,6 +683,8 @@ typedef struct rb_objspace { int sweeping_heap_count; int fork_vm_lock_lev; + + struct rb_gc_vm_context vm_context; } rb_objspace_t; #ifndef HEAP_PAGE_ALIGN_LOG @@ -1652,6 +1654,14 @@ rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE ptr) !RVALUE_MARKED(objspace, ptr); } +struct rb_gc_vm_context * +rb_gc_impl_get_vm_context(void *objspace_ptr) +{ + rb_objspace_t *objspace = objspace_ptr; + + return &objspace->vm_context; +} + static void free_stack_chunks(mark_stack_t *); static void mark_stack_free_cache(mark_stack_t *); static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page); @@ -6767,6 +6777,8 @@ gc_marking_enter(rb_objspace_t *objspace) if (MEASURE_GC) { gc_clock_start(&objspace->profile.marking_start_time); } + + rb_gc_initialize_vm_context(&objspace->vm_context); } static void diff --git a/gc/gc.h b/gc/gc.h index 469a4902f03365..d38a129887d27f 100644 --- a/gc/gc.h +++ b/gc/gc.h @@ -11,7 +11,6 @@ */ #include "ruby/ruby.h" -#if USE_MODULAR_GC #include "ruby/thread_native.h" struct rb_gc_vm_context { @@ -19,7 +18,6 @@ struct rb_gc_vm_context { struct rb_execution_context_struct *ec; }; -#endif typedef int (*vm_table_foreach_callback_func)(VALUE value, void *data); typedef int (*vm_table_update_callback_func)(VALUE *value, void *data); diff --git a/gc/gc_impl.h b/gc/gc_impl.h index 7898316a75e536..d9e44cc66d89e6 100644 --- a/gc/gc_impl.h +++ b/gc/gc_impl.h @@ -54,6 +54,7 @@ GC_IMPL_FN void rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag); GC_IMPL_FN VALUE rb_gc_impl_stress_get(void *objspace_ptr); GC_IMPL_FN VALUE rb_gc_impl_config_get(void *objspace_ptr); GC_IMPL_FN void rb_gc_impl_config_set(void *objspace_ptr, VALUE hash); +GC_IMPL_FN struct rb_gc_vm_context *rb_gc_impl_get_vm_context(void *objspace_ptr); // Object allocation GC_IMPL_FN VALUE rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size); GC_IMPL_FN size_t rb_gc_impl_obj_slot_size(VALUE obj); diff --git a/gc/mmtk/mmtk.c b/gc/mmtk/mmtk.c index 3f680e76f4cd9f..e4cd71925c7ae6 100644 --- a/gc/mmtk/mmtk.c +++ b/gc/mmtk/mmtk.c @@ -253,11 +253,7 @@ rb_mmtk_scan_gc_roots(void) { struct objspace *objspace = rb_gc_get_objspace(); - // FIXME: Make `rb_gc_mark_roots` aware that the current thread may not have EC. - // See: https://github.com/ruby/mmtk/issues/22 - rb_gc_worker_thread_set_vm_context(&objspace->vm_context); rb_gc_mark_roots(objspace, NULL); - rb_gc_worker_thread_unset_vm_context(&objspace->vm_context); } static int @@ -784,6 +780,14 @@ rb_gc_impl_config_set(void *objspace_ptr, VALUE hash) // TODO } +struct rb_gc_vm_context * +rb_gc_impl_get_vm_context(void *objspace_ptr) +{ + struct objspace *objspace = objspace_ptr; + + return &objspace->vm_context; +} + // Object allocation static VALUE diff --git a/internal/gc.h b/internal/gc.h index a255996734fdcc..77651c10baa08d 100644 --- a/internal/gc.h +++ b/internal/gc.h @@ -199,6 +199,7 @@ RUBY_ATTR_MALLOC void *rb_xmalloc_mul_add_mul(size_t, size_t, size_t, size_t); RUBY_ATTR_MALLOC void *rb_xcalloc_mul_add_mul(size_t, size_t, size_t, size_t); void rb_gc_obj_id_moved(VALUE obj); void rb_gc_register_pinning_obj(VALUE obj); +rb_execution_context_t *rb_gc_get_ec(void); void *rb_gc_ractor_cache_alloc(rb_ractor_t *ractor); void rb_gc_ractor_cache_free(void *cache); diff --git a/vm.c b/vm.c index 71d5a84034ecd1..42c2267ea65aa0 100644 --- a/vm.c +++ b/vm.c @@ -3763,8 +3763,8 @@ rb_execution_context_mark(const rb_execution_context_t *ec) /* mark machine stack */ if (ec->machine.stack_start && ec->machine.stack_end && - ec != GET_EC() /* marked for current ec at the first stage of marking */ - ) { + /* marked for current ec at the first stage of marking */ + ec != rb_gc_get_ec()) { rb_gc_mark_machine_context(ec); }