@@ -45,9 +45,16 @@ unsafe impl Traverse for InstanceDict {
4545unsafe impl Traverse for PyInner < Erased > {
4646 /// Because PyObject hold a `PyInner<Erased>`, so we need to trace it
4747 fn traverse ( & self , tracer_fn : & mut TraverseFn < ' _ > ) {
48- // 1. trace `dict` and `slots` field(`typ` can't trace for it's a AtomicRef while is leaked by design)
49- // 2. call vtable's trace function to trace payload
50- // self.typ.trace(tracer_fn);
48+ // For heap type instances, traverse the type reference.
49+ // PyAtomicRef holds a strong reference (via PyRef::leak), so GC must
50+ // account for it to correctly detect instance ↔ type cycles.
51+ // Static types are always alive and don't need this.
52+ let typ = & * self . typ ;
53+ if typ. heaptype_ext . is_some ( ) {
54+ // Safety: Py<PyType> and PyObject share the same memory layout
55+ let typ_obj: & PyObject = unsafe { & * ( typ as * const _ as * const PyObject ) } ;
56+ tracer_fn ( typ_obj) ;
57+ }
5158 self . dict . traverse ( tracer_fn) ;
5259 // weak_list is inline atomic pointers, no heap allocation, no trace
5360 self . slots . traverse ( tracer_fn) ;
@@ -64,10 +71,12 @@ unsafe impl Traverse for PyInner<Erased> {
6471unsafe impl < T : MaybeTraverse > Traverse for PyInner < T > {
6572 /// Type is known, so we can call `try_trace` directly instead of using erased type vtable
6673 fn traverse ( & self , tracer_fn : & mut TraverseFn < ' _ > ) {
67- // 1. trace `dict` and `slots` field(`typ` can't trace for it's a AtomicRef while is leaked by design)
68- // 2. call corresponding `try_trace` function to trace payload
69- // (No need to call vtable's trace function because we already know the type)
70- // self.typ.trace(tracer_fn);
74+ // For heap type instances, traverse the type reference (same as erased version)
75+ let typ = & * self . typ ;
76+ if typ. heaptype_ext . is_some ( ) {
77+ let typ_obj: & PyObject = unsafe { & * ( typ as * const _ as * const PyObject ) } ;
78+ tracer_fn ( typ_obj) ;
79+ }
7180 self . dict . traverse ( tracer_fn) ;
7281 // weak_list is inline atomic pointers, no heap allocation, no trace
7382 self . slots . traverse ( tracer_fn) ;
0 commit comments