Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 39 additions & 21 deletions Include/object.h
Original file line number Diff line number Diff line change
Expand Up @@ -969,26 +969,26 @@ times.

When deallocating a container object, it's possible to trigger an unbounded
chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
next" object in the chain to 0. This can easily lead to stack faults, and
next" object in the chain to 0. This can easily lead to stack overflows,
especially in threads (which typically have less stack space to work with).

A container object that participates in cyclic gc can avoid this by
bracketing the body of its tp_dealloc function with a pair of macros:
A container object can avoid this by bracketing the body of its tp_dealloc
function with a pair of macros:

static void
mytype_dealloc(mytype *p)
{
... declarations go here ...

PyObject_GC_UnTrack(p); // must untrack first
Py_TRASHCAN_SAFE_BEGIN(p)
Py_TRASHCAN_BEGIN(p, mytype_dealloc)
... The body of the deallocator goes here, including all calls ...
... to Py_DECREF on contained objects. ...
Py_TRASHCAN_SAFE_END(p)
Py_TRASHCAN_END // there should be no code after this
}

CAUTION: Never return from the middle of the body! If the body needs to
"get out early", put a label immediately before the Py_TRASHCAN_SAFE_END
"get out early", put a label immediately before the Py_TRASHCAN_END
call, and goto it. Else the call-depth counter (see below) will stay
above 0 forever, and the trashcan will never get emptied.

Expand All @@ -1004,6 +1004,12 @@ notices this, and calls another routine to deallocate all the objects that
may have been added to the list of deferred deallocations. In effect, a
chain of N deallocations is broken into N / PyTrash_UNWIND_LEVEL pieces,
with the call stack never exceeding a depth of PyTrash_UNWIND_LEVEL.

Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
class, we need to ensure that the trashcan is only triggered on the tp_dealloc
of the actual class being deallocated. Otherwise we might end up with a
partially-deallocated object. To check this, the tp_dealloc function must be
passed as second argument to Py_TRASHCAN_BEGIN().
*/

/* This is the old private API, invoked by the macros before 2.7.4.
Expand All @@ -1020,26 +1026,38 @@ PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(void);
#define PyTrash_UNWIND_LEVEL 50

/* Note the workaround for when the thread state is NULL (issue #17703) */
#define Py_TRASHCAN_SAFE_BEGIN(op) \
#define Py_TRASHCAN_BEGIN_CONDITION(op, cond) \
do { \
PyThreadState *_tstate = PyThreadState_GET(); \
if (!_tstate || \
_tstate->trash_delete_nesting < PyTrash_UNWIND_LEVEL) { \
if (_tstate) \
++_tstate->trash_delete_nesting;
/* The body of the deallocator is here. */
#define Py_TRASHCAN_SAFE_END(op) \
if (_tstate) { \
--_tstate->trash_delete_nesting; \
if (_tstate->trash_delete_later \
&& _tstate->trash_delete_nesting <= 0) \
_PyTrash_thread_destroy_chain(); \
PyThreadState *_tstate = NULL; \
/* If "cond" is false, then _tstate remains NULL and the deallocator \
* is run normally without involving the trashcan */ \
if (cond && (_tstate = PyThreadState_GET()) != NULL) { \
if (_tstate->trash_delete_nesting >= PyTrash_UNWIND_LEVEL) { \
/* Store the object (to be deallocated later) and jump past \
* Py_TRASHCAN_END, skipping the body of the deallocator */ \
_PyTrash_thread_deposit_object((PyObject*)op); \
break; \
} \
++_tstate->trash_delete_nesting; \
}
/* The body of the deallocator is here. */
#define Py_TRASHCAN_END \
if (_tstate) { \
--_tstate->trash_delete_nesting; \
if (_tstate->trash_delete_later && _tstate->trash_delete_nesting <= 0) \
_PyTrash_thread_destroy_chain(); \
} \
else \
_PyTrash_thread_deposit_object((PyObject*)op); \
} while (0);

#define Py_TRASHCAN_BEGIN(op, dealloc) Py_TRASHCAN_BEGIN_CONDITION(op, \
Py_TYPE(op)->tp_dealloc == (destructor)(dealloc))

/* For backwards compatibility, these macros enable the trashcan
* unconditionally */
#define Py_TRASHCAN_SAFE_BEGIN(op) Py_TRASHCAN_BEGIN_CONDITION(op, 1)
#define Py_TRASHCAN_SAFE_END(op) Py_TRASHCAN_END


#ifdef __cplusplus
}
#endif
Expand Down
35 changes: 35 additions & 0 deletions Lib/test/test_capi.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,41 @@ class CAPITest(unittest.TestCase):
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()

def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))

def test_trashcan_python_class(self):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (list in this test)
class PyList(list):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
PyList.num += 1
list.__init__(self, *args)
def __del__(self):
PyList.num -= 1

for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)


@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
Expand Down
18 changes: 18 additions & 0 deletions Lib/test/test_gc.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,24 @@ def __del__(self):
v = {1: v, 2: Ouch()}
gc.disable()

def test_no_double_del(self):
# bpo-36556: instances of heap types should be deallocated once,
# even if the trashcan and __del__ are involved
class ObjectCounter(object):
count = 0
def __init__(self):
type(self).count += 1
def __del__(self):
# create temporary involving self, whose deallocation
# uses the trashcan
L = [self]
type(self).count -= 1
L = None
for i in range(10000):
L = (L, ObjectCounter())
del L
self.assertEqual(ObjectCounter.count, 0)

@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Added new trashcan macros to deal with a double deallocation that could occur
when the `tp_dealloc` of a subclass calls the `tp_dealloc` of a base class
and that base class uses the trashcan mechanism. Patch by Jeroen Demeyer.
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
When deleting highly nested objects (where the trashcan mechanism is
involved), it is less likely that ``__del__`` is called multiple times.
76 changes: 76 additions & 0 deletions Modules/_testcapimodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -2944,6 +2944,76 @@ static PyTypeObject test_structmembersType = {
};


/* Test bpo-35983: create a subclass of "list" which checks that instances
* are not deallocated twice */

typedef struct {
PyListObject list;
int deallocated;
} MyListObject;

static PyObject *
MyList_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
PyObject* op = PyList_Type.tp_new(type, args, kwds);
((MyListObject*)op)->deallocated = 0;
return op;
}

void
MyList_dealloc(MyListObject* op)
{
if (op->deallocated) {
/* We cannot raise exceptions here but we still want the testsuite
* to fail when we hit this */
Py_FatalError("MyList instance deallocated twice");
}
op->deallocated = 1;
PyList_Type.tp_dealloc((PyObject *)op);
}

static PyTypeObject MyList_Type = {
PyVarObject_HEAD_INIT(NULL, 0)
"MyList",
sizeof(MyListObject),
0,
(destructor)MyList_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_reserved */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* &PyList_Type */ /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
MyList_new, /* tp_new */
};


PyMODINIT_FUNC
init_testcapi(void)
{
Expand All @@ -2961,6 +3031,12 @@ init_testcapi(void)
test_capi to automatically call this */
PyModule_AddObject(m, "_test_structmembersType", (PyObject *)&test_structmembersType);

MyList_Type.tp_base = &PyList_Type;
if (PyType_Ready(&MyList_Type) < 0)
return NULL;
Py_INCREF(&MyList_Type);
PyModule_AddObject(m, "MyList", (PyObject *)&MyList_Type);

PyModule_AddObject(m, "CHAR_MAX", PyInt_FromLong(CHAR_MAX));
PyModule_AddObject(m, "CHAR_MIN", PyInt_FromLong(CHAR_MIN));
PyModule_AddObject(m, "UCHAR_MAX", PyInt_FromLong(UCHAR_MAX));
Expand Down
4 changes: 2 additions & 2 deletions Objects/descrobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -940,11 +940,11 @@ static void
wrapper_dealloc(wrapperobject *wp)
{
PyObject_GC_UnTrack(wp);
Py_TRASHCAN_SAFE_BEGIN(wp)
Py_TRASHCAN_BEGIN(wp, wrapper_dealloc)
Py_XDECREF(wp->descr);
Py_XDECREF(wp->self);
PyObject_GC_Del(wp);
Py_TRASHCAN_SAFE_END(wp)
Py_TRASHCAN_END
}

static int
Expand Down
4 changes: 2 additions & 2 deletions Objects/dictobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -1078,7 +1078,7 @@ dict_dealloc(register PyDictObject *mp)
Py_ssize_t fill = mp->ma_fill;
/* bpo-31095: UnTrack is needed before calling any callbacks */
PyObject_GC_UnTrack(mp);
Py_TRASHCAN_SAFE_BEGIN(mp)
Py_TRASHCAN_BEGIN(mp, dict_dealloc)
for (ep = mp->ma_table; fill > 0; ep++) {
if (ep->me_key) {
--fill;
Expand All @@ -1092,7 +1092,7 @@ dict_dealloc(register PyDictObject *mp)
free_list[numfree++] = mp;
else
Py_TYPE(mp)->tp_free((PyObject *)mp);
Py_TRASHCAN_SAFE_END(mp)
Py_TRASHCAN_END
}

static int
Expand Down
4 changes: 2 additions & 2 deletions Objects/listobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ list_dealloc(PyListObject *op)
{
Py_ssize_t i;
PyObject_GC_UnTrack(op);
Py_TRASHCAN_SAFE_BEGIN(op)
Py_TRASHCAN_BEGIN(op, list_dealloc)
if (op->ob_item != NULL) {
/* Do it backwards, for Christian Tismer.
There's a simple test case where somehow this reduces
Expand All @@ -314,7 +314,7 @@ list_dealloc(PyListObject *op)
free_list[numfree++] = op;
else
Py_TYPE(op)->tp_free((PyObject *)op);
Py_TRASHCAN_SAFE_END(op)
Py_TRASHCAN_END
}

static int
Expand Down
4 changes: 2 additions & 2 deletions Objects/setobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,7 @@ set_dealloc(PySetObject *so)
Py_ssize_t fill = so->fill;
/* bpo-31095: UnTrack is needed before calling any callbacks */
PyObject_GC_UnTrack(so);
Py_TRASHCAN_SAFE_BEGIN(so)
Py_TRASHCAN_BEGIN(so, set_dealloc)
if (so->weakreflist != NULL)
PyObject_ClearWeakRefs((PyObject *) so);

Expand All @@ -567,7 +567,7 @@ set_dealloc(PySetObject *so)
free_list[numfree++] = so;
else
Py_TYPE(so)->tp_free(so);
Py_TRASHCAN_SAFE_END(so)
Py_TRASHCAN_END
}

static int
Expand Down
4 changes: 2 additions & 2 deletions Objects/tupleobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ tupledealloc(register PyTupleObject *op)
register Py_ssize_t i;
register Py_ssize_t len = Py_SIZE(op);
PyObject_GC_UnTrack(op);
Py_TRASHCAN_SAFE_BEGIN(op)
Py_TRASHCAN_BEGIN(op, tupledealloc)
if (len > 0) {
i = len;
while (--i >= 0)
Expand All @@ -234,7 +234,7 @@ tupledealloc(register PyTupleObject *op)
}
Py_TYPE(op)->tp_free((PyObject *)op);
done:
Py_TRASHCAN_SAFE_END(op)
Py_TRASHCAN_END
}

static int
Expand Down
Loading