diff --git a/Include/internal/pycore_gc.h b/Include/internal/pycore_gc.h index 9e465fdd86279f3..c570a919e2e8592 100644 --- a/Include/internal/pycore_gc.h +++ b/Include/internal/pycore_gc.h @@ -312,6 +312,10 @@ struct _gc_runtime_state { collections, and are awaiting to undergo a full collection for the first time. */ Py_ssize_t long_lived_pending; + + /* True to use immortalization in places where we would normally use + deferred reference counting. */ + int immortalize_deferred; #endif }; @@ -343,6 +347,11 @@ extern void _PyGC_ClearAllFreeLists(PyInterpreterState *interp); extern void _Py_ScheduleGC(PyThreadState *tstate); extern void _Py_RunGC(PyThreadState *tstate); +#ifdef Py_GIL_DISABLED +// gh-117783: Immortalize objects that use deferred reference counting +extern void _PyGC_ImmortalizeDeferredObjects(PyInterpreterState *interp); +#endif + #ifdef __cplusplus } #endif diff --git a/Objects/object.c b/Objects/object.c index fbb4fdac7d405be..0d4c13a075aed6d 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2427,6 +2427,12 @@ _PyObject_SetDeferredRefcount(PyObject *op) assert(PyType_IS_GC(Py_TYPE(op))); assert(_Py_IsOwnedByCurrentThread(op)); assert(op->ob_ref_shared == 0); + PyInterpreterState *interp = _PyInterpreterState_GET(); + if (interp->gc.immortalize_deferred) { + // gh-117696: + _Py_SetImmortal(op); + return; + } op->ob_gc_bits |= _PyGC_BITS_DEFERRED; op->ob_ref_local += 1; op->ob_ref_shared = _Py_REF_QUEUED; diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 9cf0e989d0993f5..8e99947732d68aa 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -1781,6 +1781,30 @@ custom_visitor_wrapper(const mi_heap_t *heap, const mi_heap_area_t *area, return true; } +// gh-117783: Immortalize objects that use deferred reference counting to +// temporarily work around scaling bottlenecks. +static bool +immortalize_visitor(const mi_heap_t *heap, const mi_heap_area_t *area, + void *block, size_t block_size, void *args) +{ + PyObject *op = op_from_block(block, args, false); + if (op != NULL && _PyObject_HasDeferredRefcount(op)) { + _Py_SetImmortal(op); + op->ob_gc_bits &= ~_PyGC_BITS_DEFERRED; + } + return true; +} + +void +_PyGC_ImmortalizeDeferredObjects(PyInterpreterState *interp) +{ + struct visitor_args args; + _PyEval_StopTheWorld(interp); + gc_visit_heaps(interp, &immortalize_visitor, &args); + interp->gc.immortalize_deferred = 1; + _PyEval_StartTheWorld(interp); +} + void PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void *arg) { diff --git a/Python/pystate.c b/Python/pystate.c index a2f36c9f114cc36..cac051eef5e8c28 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -1566,6 +1566,13 @@ new_threadstate(PyInterpreterState *interp, int whence) // Must be called with lock unlocked to avoid re-entrancy deadlock. PyMem_RawFree(new_tstate); } + else { +#ifdef Py_GIL_DISABLED + if (!interp->gc.immortalize_deferred) { + _PyGC_ImmortalizeDeferredObjects(interp); + } +#endif + } #ifdef Py_GIL_DISABLED // Must be called with lock unlocked to avoid lock ordering deadlocks.