Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
P
Pyston
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Boxiang Sun
Pyston
Commits
138c2ea1
Commit
138c2ea1
authored
Sep 18, 2015
by
Kevin Modzelewski
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #924 from kmod/gc_fixes
Gc fix
parents
8beb9b52
c9598857
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
204 additions
and
64 deletions
+204
-64
src/asm_writing/icinfo.cpp
src/asm_writing/icinfo.cpp
+17
-0
src/asm_writing/rewriter.cpp
src/asm_writing/rewriter.cpp
+5
-0
src/asm_writing/rewriter.h
src/asm_writing/rewriter.h
+5
-0
src/gc/collector.cpp
src/gc/collector.cpp
+67
-16
src/gc/gc_alloc.h
src/gc/gc_alloc.h
+2
-0
src/gc/heap.cpp
src/gc/heap.cpp
+39
-27
src/gc/heap.h
src/gc/heap.h
+30
-20
src/runtime/builtin_modules/builtins.cpp
src/runtime/builtin_modules/builtins.cpp
+8
-1
test/tests/class_and_instance_freeing.py
test/tests/class_and_instance_freeing.py
+14
-0
test/tests/getattrfunc_rewriting.py
test/tests/getattrfunc_rewriting.py
+17
-0
No files found.
src/asm_writing/icinfo.cpp
View file @
138c2ea1
...
...
@@ -84,6 +84,9 @@ uint8_t* ICSlotRewrite::getSlotStart() {
return
(
uint8_t
*
)
ic
->
start_addr
+
ic_entry
->
idx
*
ic
->
getSlotSize
();
}
// Map of gc pointers -> number of ics that point tot hem.
static
llvm
::
DenseMap
<
void
*
,
int
>
ic_gc_references
;
void
ICSlotRewrite
::
commit
(
CommitHook
*
hook
,
std
::
vector
<
void
*>
gc_references
)
{
bool
still_valid
=
true
;
for
(
int
i
=
0
;
i
<
dependencies
.
size
();
i
++
)
{
...
...
@@ -120,7 +123,16 @@ void ICSlotRewrite::commit(CommitHook* hook, std::vector<void*> gc_references) {
// if (VERBOSITY()) printf("Commiting to %p-%p\n", start, start + ic->slot_size);
memcpy
(
slot_start
,
buf
,
ic
->
getSlotSize
());
for
(
auto
p
:
ic_entry
->
gc_references
)
{
int
&
i
=
ic_gc_references
[
p
];
if
(
i
==
1
)
ic_gc_references
.
erase
(
p
);
else
--
i
;
}
ic_entry
->
gc_references
=
std
::
move
(
gc_references
);
for
(
auto
p
:
ic_entry
->
gc_references
)
ic_gc_references
[
p
]
++
;
ic
->
times_rewritten
++
;
...
...
@@ -344,10 +356,15 @@ bool ICInfo::isMegamorphic() {
}
void
ICInfo
::
visitGCReferences
(
gc
::
GCVisitor
*
v
)
{
for
(
auto
&&
p
:
ic_gc_references
)
{
v
->
visitNonRelocatable
(
p
.
first
);
}
#if MOVING_GC
for
(
const
auto
&
p
:
ics_list
)
{
for
(
auto
&
slot
:
p
->
slots
)
{
v
->
visitNonRelocatableRange
(
&
slot
.
gc_references
[
0
],
&
slot
.
gc_references
[
slot
.
gc_references
.
size
()]);
}
}
#endif
}
}
src/asm_writing/rewriter.cpp
View file @
138c2ea1
...
...
@@ -716,6 +716,11 @@ void Rewriter::_trap() {
assembler
->
trap
();
}
void
Rewriter
::
addGCReference
(
void
*
obj
)
{
assert
(
gc
::
isValidGCObject
(
obj
));
gc_references
.
push_back
(
obj
);
}
RewriterVar
*
Rewriter
::
loadConst
(
int64_t
val
,
Location
dest
)
{
STAT_TIMER
(
t0
,
"us_timer_rewriter"
,
10
);
...
...
src/asm_writing/rewriter.h
View file @
138c2ea1
...
...
@@ -554,6 +554,11 @@ public:
const
char
*
debugName
()
{
return
rewrite
->
debugName
();
}
// Register that this rewrite will embed a reference to a particular gc object.
// TODO: come up with an implementation that is performant enough that we can automatically
// infer these from loadConst calls.
void
addGCReference
(
void
*
obj
);
void
trap
();
RewriterVar
*
loadConst
(
int64_t
val
,
Location
loc
=
Location
::
any
());
// has_side_effects: whether this call could have "side effects". the exact side effects we've
...
...
src/gc/collector.cpp
View file @
138c2ea1
...
...
@@ -17,6 +17,7 @@
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <llvm/ADT/DenseSet.h>
#include "asm_writing/icinfo.h"
#include "codegen/ast_interpreter.h"
...
...
@@ -178,7 +179,7 @@ public:
}
void
addWork
(
void
*
p
)
{
GC_TRACE_LOG
(
"Pushing
%p
\n
"
,
p
);
GC_TRACE_LOG
(
"Pushing
(%d) %p
\n
"
,
visit_type
,
p
);
GCAllocation
*
al
=
GCAllocation
::
fromUserData
(
p
);
switch
(
visit_type
)
{
...
...
@@ -213,8 +214,10 @@ public:
// http://pypy.readthedocs.org/en/latest/discussion/finalizer-order.html
case
TraversalType
:
:
FinalizationOrderingFindReachable
:
if
(
orderingState
(
al
)
==
FinalizationState
::
UNREACHABLE
)
{
GC_TRACE_LOG
(
"%p is now TEMPORARY
\n
"
,
al
->
user_data
);
setOrderingState
(
al
,
FinalizationState
::
TEMPORARY
);
}
else
if
(
orderingState
(
al
)
==
FinalizationState
::
REACHABLE_FROM_FINALIZER
)
{
GC_TRACE_LOG
(
"%p is now ALIVE
\n
"
,
al
->
user_data
);
setOrderingState
(
al
,
FinalizationState
::
ALIVE
);
}
else
{
return
;
...
...
@@ -222,6 +225,7 @@ public:
break
;
case
TraversalType
:
:
FinalizationOrderingRemoveTemporaries
:
if
(
orderingState
(
al
)
==
FinalizationState
::
TEMPORARY
)
{
GC_TRACE_LOG
(
"%p is now REACHABLE_FROM_FINALIZER
\n
"
,
al
->
user_data
);
setOrderingState
(
al
,
FinalizationState
::
REACHABLE_FROM_FINALIZER
);
}
else
{
return
;
...
...
@@ -366,6 +370,7 @@ void registerPythonObject(Box* b) {
assert
(
b
->
cls
);
if
(
hasOrderedFinalizer
(
b
->
cls
))
{
GC_TRACE_LOG
(
"%p is registered as having an ordered finalizer
\n
"
,
b
);
objects_with_ordered_finalizers
.
push_back
(
b
);
}
}
...
...
@@ -380,6 +385,7 @@ void invalidateOrderedFinalizerList() {
if
(
!
hasOrderedFinalizer
(
box
->
cls
)
||
hasFinalized
(
al
))
{
// Cleanup.
GC_TRACE_LOG
(
"Removing %p from objects_with_ordered_finalizers
\n
"
,
box
);
iter
=
objects_with_ordered_finalizers
.
erase
(
iter
);
}
else
{
++
iter
;
...
...
@@ -540,8 +546,8 @@ static void visitRoots(GCVisitor& visitor) {
}
GC_TRACE_LOG
(
"Looking at generated code pointers
\n
"
);
#if MOVING_GC
ICInfo
::
visitGCReferences
(
&
visitor
);
#if MOVING_GC
CompiledFunction
::
visitAllCompiledFunctions
(
&
visitor
);
#endif
}
...
...
@@ -554,8 +560,10 @@ static void finalizationOrderingFindReachable(Box* obj) {
TraversalWorklist
worklist
(
TraversalType
::
FinalizationOrderingFindReachable
);
GCVisitor
visitor
(
&
worklist
);
GC_TRACE_LOG
(
"findReachable %p
\n
"
,
obj
);
worklist
.
addWork
(
obj
);
while
(
void
*
p
=
worklist
.
next
())
{
GC_TRACE_LOG
(
"findReachable, looking at %p
\n
"
,
p
);
sc_marked_objs
.
log
();
visitByGCKind
(
p
,
visitor
);
...
...
@@ -572,8 +580,10 @@ static void finalizationOrderingRemoveTemporaries(Box* obj) {
TraversalWorklist
worklist
(
TraversalType
::
FinalizationOrderingRemoveTemporaries
);
GCVisitor
visitor
(
&
worklist
);
GC_TRACE_LOG
(
"removeTemporaries %p
\n
"
,
obj
);
worklist
.
addWork
(
obj
);
while
(
void
*
p
=
worklist
.
next
())
{
GC_TRACE_LOG
(
"removeTemporaries, looking at %p
\n
"
,
p
);
GCAllocation
*
al
=
GCAllocation
::
fromUserData
(
p
);
assert
(
orderingState
(
al
)
!=
FinalizationState
::
UNREACHABLE
);
visitByGCKind
(
p
,
visitor
);
...
...
@@ -611,6 +621,7 @@ static void orderFinalizers() {
assert
(
state
==
FinalizationState
::
REACHABLE_FROM_FINALIZER
||
state
==
FinalizationState
::
ALIVE
);
if
(
state
==
FinalizationState
::
REACHABLE_FROM_FINALIZER
)
{
GC_TRACE_LOG
(
"%p is now pending finalization
\n
"
,
marked
);
pending_finalization_list
.
push_back
(
marked
);
}
}
...
...
@@ -667,6 +678,8 @@ static void callPendingFinalizers() {
Box
*
box
=
pending_finalization_list
.
front
();
pending_finalization_list
.
pop_front
();
GC_TRACE_LOG
(
"Running finalizer for %p
\n
"
,
box
);
ASSERT
(
isValidGCObject
(
box
),
"objects to be finalized should still be alive"
);
if
(
isWeaklyReferenced
(
box
))
{
...
...
@@ -741,7 +754,7 @@ static void prepareWeakrefCallbacks(Box* box) {
}
}
static
void
markPhase
()
{
void
markPhase
()
{
static
StatCounter
sc_us
(
"us_gc_mark_phase"
);
Timer
_t
(
"markPhase"
,
/*min_usec=*/
10000
);
...
...
@@ -775,13 +788,13 @@ static void markPhase() {
sc_us
.
log
(
us
);
}
static
void
sweepPhase
(
std
::
vector
<
Box
*>&
weakly_referenced
)
{
static
void
sweepPhase
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
)
{
static
StatCounter
sc_us
(
"us_gc_sweep_phase"
);
Timer
_t
(
"sweepPhase"
,
/*min_usec=*/
10000
);
// we need to use the allocator here because these objects are referenced only here, and calling the weakref
// callbacks could start another gc
global_heap
.
freeUnmarked
(
weakly_referenced
);
global_heap
.
freeUnmarked
(
weakly_referenced
,
classes_to_free
);
long
us
=
_t
.
end
();
sc_us
.
log
(
us
);
...
...
@@ -901,6 +914,24 @@ void endGCUnexpectedRegion() {
should_not_reenter_gc
=
false
;
}
#if TRACE_GC_MARKING
static
void
openTraceFp
(
bool
is_pre
)
{
if
(
trace_fp
)
fclose
(
trace_fp
);
char
tracefn_buf
[
80
];
snprintf
(
tracefn_buf
,
sizeof
(
tracefn_buf
),
"gc_trace_%d.%03d%s.txt"
,
getpid
(),
ncollections
+
is_pre
,
is_pre
?
"_pre"
:
""
);
trace_fp
=
fopen
(
tracefn_buf
,
"w"
);
}
static
int
_dummy
()
{
openTraceFp
(
true
);
return
0
;
}
static
int
_initializer
=
_dummy
();
#endif
void
runCollection
()
{
static
StatCounter
sc_us
(
"us_gc_collections"
);
static
StatCounter
sc
(
"gc_collections"
);
...
...
@@ -925,13 +956,7 @@ void runCollection() {
Timer
_t
(
"collecting"
,
/*min_usec=*/
10000
);
#if TRACE_GC_MARKING
#if 1 // separate log file per collection
char
tracefn_buf
[
80
];
snprintf
(
tracefn_buf
,
sizeof
(
tracefn_buf
),
"gc_trace_%d.%03d.txt"
,
getpid
(),
ncollections
);
trace_fp
=
fopen
(
tracefn_buf
,
"w"
);
#else // overwrite previous log file with each collection
trace_fp
=
fopen
(
"gc_trace.txt"
,
"w"
);
#endif
openTraceFp
(
false
);
#endif
global_heap
.
prepareForCollection
();
...
...
@@ -949,7 +974,15 @@ void runCollection() {
// since the deallocation of other objects (namely, the weakref objects themselves) can affect
// those lists, and we want to see the final versions.
std
::
vector
<
Box
*>
weakly_referenced
;
sweepPhase
(
weakly_referenced
);
// Separately keep track of classes that we will be freeing in this collection.
// We want to make sure that any instances get freed before the class itself gets freed,
// since the freeing logic can look at the class object.
// So instead of directly freeing the classes, we stuff them into this vector and then
// free them at the end.
std
::
vector
<
BoxedClass
*>
classes_to_free
;
sweepPhase
(
weakly_referenced
,
classes_to_free
);
// Handle weakrefs in two passes:
// - first, find all of the weakref objects whose callbacks we need to call. we need to iterate
...
...
@@ -958,8 +991,27 @@ void runCollection() {
// - the callbacks are called later, along with the finalizers
for
(
auto
o
:
weakly_referenced
)
{
assert
(
isValidGCObject
(
o
));
GC_TRACE_LOG
(
"%p is weakly referenced
\n
"
,
o
);
prepareWeakrefCallbacks
(
o
);
global_heap
.
free
(
GCAllocation
::
fromUserData
(
o
));
if
(
PyType_Check
(
o
))
classes_to_free
.
push_back
(
static_cast
<
BoxedClass
*>
(
o
));
else
global_heap
.
free
(
GCAllocation
::
fromUserData
(
o
));
}
// We want to make sure that classes get freed before their metaclasses.
// Use a simple approach of only freeing one level of the hierarchy and then
// letting the next collection do the next one.
llvm
::
DenseSet
<
BoxedClass
*>
classes_to_not_free
;
for
(
auto
b
:
classes_to_free
)
{
classes_to_not_free
.
insert
(
b
->
cls
);
}
for
(
auto
b
:
classes_to_free
)
{
if
(
classes_to_not_free
.
count
(
b
))
continue
;
global_heap
.
_setFree
(
GCAllocation
::
fromUserData
(
b
));
}
global_heap
.
cleanupAfterCollection
();
...
...
@@ -969,8 +1021,7 @@ void runCollection() {
#endif
#if TRACE_GC_MARKING
fclose
(
trace_fp
);
trace_fp
=
NULL
;
openTraceFp
(
true
);
#endif
should_not_reenter_gc
=
false
;
// end non-reentrant section
...
...
src/gc/gc_alloc.h
View file @
138c2ea1
...
...
@@ -112,6 +112,8 @@ extern "C" inline void* gc_alloc(size_t bytes, GCKind kind_id) {
// if (VERBOSITY()) printf("Allocated %ld bytes at [%p, %p)\n", bytes, r, (char*)r + bytes);
#endif
GC_TRACE_LOG
(
"Allocated %p
\n
"
,
r
);
#if STAT_ALLOCATIONS
gc_alloc_bytes
.
log
(
alloc_bytes
);
gc_alloc_bytes_typed
[(
int
)
kind_id
].
log
(
alloc_bytes
);
...
...
src/gc/heap.cpp
View file @
138c2ea1
...
...
@@ -45,7 +45,7 @@ template <> void return_temporary_buffer<pyston::Box*>(pyston::Box** p) {
namespace
pyston
{
namespace
gc
{
bool
_doFree
(
GCAllocation
*
al
,
std
::
vector
<
Box
*>*
weakly_referenced
);
bool
_doFree
(
GCAllocation
*
al
,
std
::
vector
<
Box
*>*
weakly_referenced
,
std
::
vector
<
BoxedClass
*>*
classes_to_free
);
// lots of linked lists around here, so let's just use template functions for operations on them.
template
<
class
ListT
>
inline
void
nullNextPrev
(
ListT
*
node
)
{
...
...
@@ -88,7 +88,8 @@ template <class ListT, typename Func> inline void forEach(ListT* list, Func func
}
template
<
class
ListT
,
typename
Free
>
inline
void
sweepList
(
ListT
*
head
,
std
::
vector
<
Box
*>&
weakly_referenced
,
Free
free_func
)
{
inline
void
sweepList
(
ListT
*
head
,
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
,
Free
free_func
)
{
auto
cur
=
head
;
while
(
cur
)
{
GCAllocation
*
al
=
cur
->
data
;
...
...
@@ -97,7 +98,7 @@ inline void sweepList(ListT* head, std::vector<Box*>& weakly_referenced, Free fr
clearMark
(
al
);
cur
=
cur
->
next
;
}
else
{
if
(
_doFree
(
al
,
&
weakly_referenced
))
{
if
(
_doFree
(
al
,
&
weakly_referenced
,
&
classes_to_free
))
{
removeFromLL
(
cur
);
auto
to_free
=
cur
;
...
...
@@ -128,12 +129,7 @@ void _bytesAllocatedTripped() {
/// Finalizers
bool
hasOrderedFinalizer
(
BoxedClass
*
cls
)
{
if
(
PyType_FastSubclass
(
cls
,
Py_TPFLAGS_TYPE_SUBCLASS
))
{
// Class objects need to be kept alive for at least as long as any objects that point
// to them, even if those objects are garbage (or involved in finalizer chains).
// Marking class objects as having finalizers should get us this behavior.
return
true
;
}
else
if
(
cls
->
has_safe_tp_dealloc
)
{
if
(
cls
->
has_safe_tp_dealloc
)
{
ASSERT
(
!
cls
->
tp_del
,
"class
\"
%s
\"
with safe tp_dealloc also has tp_del?"
,
cls
->
tp_name
);
return
false
;
}
else
if
(
cls
->
hasNonDefaultTpDealloc
())
{
...
...
@@ -164,9 +160,10 @@ __attribute__((always_inline)) bool isWeaklyReferenced(Box* b) {
Heap
global_heap
;
__attribute__
((
always_inline
))
bool
_doFree
(
GCAllocation
*
al
,
std
::
vector
<
Box
*>*
weakly_referenced
)
{
static
StatCounter
gc_safe_destructors
(
"gc_safe_destructor_calls"
);
static
StatCounter
gc_safe_destructors
(
"gc_safe_destructor_calls"
);
__attribute__
((
always_inline
))
bool
_doFree
(
GCAllocation
*
al
,
std
::
vector
<
Box
*>*
weakly_referenced
,
std
::
vector
<
BoxedClass
*>*
classes_to_free
)
{
#ifndef NVALGRIND
VALGRIND_DISABLE_ERROR_REPORTING
;
#endif
...
...
@@ -175,6 +172,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
VALGRIND_ENABLE_ERROR_REPORTING
;
#endif
GC_TRACE_LOG
(
"doFree %p
\n
"
,
al
->
user_data
);
if
(
alloc_kind
==
GCKind
::
PYTHON
)
{
#ifndef NVALGRIND
VALGRIND_DISABLE_ERROR_REPORTING
;
...
...
@@ -185,15 +183,24 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
#endif
assert
(
b
->
cls
);
if
(
isWeaklyReferenced
(
b
))
{
if
(
unlikely
(
isWeaklyReferenced
(
b
)
))
{
assert
(
weakly_referenced
&&
"attempting to free a weakly referenced object manually"
);
weakly_referenced
->
push_back
(
b
);
GC_TRACE_LOG
(
"%p is weakly referenced
\n
"
,
al
->
user_data
);
return
false
;
}
ASSERT
(
!
hasOrderedFinalizer
(
b
->
cls
)
||
hasFinalized
(
al
),
"%s"
,
getTypeName
(
b
));
// Note: do this check after the weakrefs check.
if
(
unlikely
(
PyType_Check
(
b
)))
{
assert
(
classes_to_free
);
classes_to_free
->
push_back
(
static_cast
<
BoxedClass
*>
(
b
));
return
false
;
}
if
(
b
->
cls
->
tp_dealloc
!=
dealloc_null
&&
b
->
cls
->
has_safe_tp_dealloc
)
{
GC_TRACE_LOG
(
"running safe destructor for %p
\n
"
,
b
);
gc_safe_destructors
.
log
();
GCAllocation
*
al
=
GCAllocation
::
fromUserData
(
b
);
...
...
@@ -208,7 +215,7 @@ __attribute__((always_inline)) bool _doFree(GCAllocation* al, std::vector<Box*>*
}
void
Heap
::
destructContents
(
GCAllocation
*
al
)
{
_doFree
(
al
,
NULL
);
_doFree
(
al
,
NULL
,
NULL
);
}
struct
HeapStatistics
{
...
...
@@ -481,11 +488,15 @@ void SmallArena::forEachReference(std::function<void(GCAllocation*, size_t)> f)
}
}
void
SmallArena
::
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
)
{
void
SmallArena
::
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
)
{
assertConsistent
();
thread_caches
.
forEachValue
([
this
,
&
weakly_referenced
](
ThreadBlockCache
*
cache
)
{
for
(
int
bidx
=
0
;
bidx
<
NUM_BUCKETS
;
bidx
++
)
{
thread_caches
.
forEachValue
([
this
,
&
weakly_referenced
,
&
classes_to_free
](
ThreadBlockCache
*
cache
)
{
// Iterate over the buckets from largest to smallest. I don't think it really matters, but
// doing it in this order means that we will tend to free types early in the sweep (since they
// are fairly large), and we are more likely to detect if other code depended on that type
// being alive.
for
(
int
bidx
=
NUM_BUCKETS
-
1
;
bidx
>=
0
;
bidx
--
)
{
Block
*
h
=
cache
->
cache_free_heads
[
bidx
];
// Try to limit the amount of unused memory a thread can hold onto;
// currently pretty dumb, just limit the number of blocks in the free-list
...
...
@@ -504,8 +515,8 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
insertIntoLL
(
&
heads
[
bidx
],
h
);
}
Block
**
chain_end
=
_freeChain
(
&
cache
->
cache_free_heads
[
bidx
],
weakly_referenced
);
_freeChain
(
&
cache
->
cache_full_heads
[
bidx
],
weakly_referenced
);
Block
**
chain_end
=
_freeChain
(
&
cache
->
cache_free_heads
[
bidx
],
weakly_referenced
,
classes_to_free
);
_freeChain
(
&
cache
->
cache_full_heads
[
bidx
],
weakly_referenced
,
classes_to_free
);
while
(
Block
*
b
=
cache
->
cache_full_heads
[
bidx
])
{
removeFromLLAndNull
(
b
);
...
...
@@ -514,9 +525,9 @@ void SmallArena::freeUnmarked(std::vector<Box*>& weakly_referenced) {
}
});
for
(
int
bidx
=
0
;
bidx
<
NUM_BUCKETS
;
bidx
++
)
{
Block
**
chain_end
=
_freeChain
(
&
heads
[
bidx
],
weakly_referenced
);
_freeChain
(
&
full_heads
[
bidx
],
weakly_referenced
);
for
(
int
bidx
=
NUM_BUCKETS
-
1
;
bidx
>=
0
;
bidx
--
)
{
Block
**
chain_end
=
_freeChain
(
&
heads
[
bidx
],
weakly_referenced
,
classes_to_free
);
_freeChain
(
&
full_heads
[
bidx
],
weakly_referenced
,
classes_to_free
);
while
(
Block
*
b
=
full_heads
[
bidx
])
{
removeFromLLAndNull
(
b
);
...
...
@@ -543,7 +554,8 @@ void SmallArena::getStatistics(HeapStatistics* stats) {
}
SmallArena
::
Block
**
SmallArena
::
_freeChain
(
Block
**
head
,
std
::
vector
<
Box
*>&
weakly_referenced
)
{
SmallArena
::
Block
**
SmallArena
::
_freeChain
(
Block
**
head
,
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
)
{
while
(
Block
*
b
=
*
head
)
{
int
num_objects
=
b
->
numObjects
();
int
first_obj
=
b
->
minObjIndex
();
...
...
@@ -568,7 +580,7 @@ SmallArena::Block** SmallArena::_freeChain(Block** head, std::vector<Box*>& weak
if
(
isMarked
(
al
))
{
clearMark
(
al
);
}
else
{
if
(
_doFree
(
al
,
&
weakly_referenced
))
{
if
(
_doFree
(
al
,
&
weakly_referenced
,
&
classes_to_free
))
{
// GC_TRACE_LOG("freeing %p\n", al->user_data);
b
->
isfree
.
set
(
atom_idx
);
#ifndef NDEBUG
...
...
@@ -807,8 +819,8 @@ void LargeArena::cleanupAfterCollection() {
lookup
.
clear
();
}
void
LargeArena
::
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
)
{
sweepList
(
head
,
weakly_referenced
,
[
this
](
LargeObj
*
ptr
)
{
_freeLargeObj
(
ptr
);
});
void
LargeArena
::
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
)
{
sweepList
(
head
,
weakly_referenced
,
classes_to_free
,
[
this
](
LargeObj
*
ptr
)
{
_freeLargeObj
(
ptr
);
});
}
void
LargeArena
::
getStatistics
(
HeapStatistics
*
stats
)
{
...
...
@@ -1016,8 +1028,8 @@ void HugeArena::cleanupAfterCollection() {
lookup
.
clear
();
}
void
HugeArena
::
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
)
{
sweepList
(
head
,
weakly_referenced
,
[
this
](
HugeObj
*
ptr
)
{
_freeHugeObj
(
ptr
);
});
void
HugeArena
::
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
)
{
sweepList
(
head
,
weakly_referenced
,
classes_to_free
,
[
this
](
HugeObj
*
ptr
)
{
_freeHugeObj
(
ptr
);
});
}
void
HugeArena
::
getStatistics
(
HeapStatistics
*
stats
)
{
...
...
src/gc/heap.h
View file @
138c2ea1
...
...
@@ -272,7 +272,7 @@ public:
void
free
(
GCAllocation
*
al
);
GCAllocation
*
allocationFrom
(
void
*
ptr
);
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
);
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
);
void
getStatistics
(
HeapStatistics
*
stats
);
...
...
@@ -414,7 +414,7 @@ private:
Block
*
_allocBlock
(
uint64_t
size
,
Block
**
prev
);
GCAllocation
*
_allocFromBlock
(
Block
*
b
);
Block
*
_claimBlock
(
size_t
rounded_size
,
Block
**
free_head
);
Block
**
_freeChain
(
Block
**
head
,
std
::
vector
<
Box
*>&
weakly_referenced
);
Block
**
_freeChain
(
Block
**
head
,
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
);
void
_getChainStatistics
(
HeapStatistics
*
stats
,
Block
**
head
);
GCAllocation
*
_alloc
(
size_t
bytes
,
int
bucket_idx
);
...
...
@@ -496,7 +496,7 @@ public:
void
free
(
GCAllocation
*
alloc
);
GCAllocation
*
allocationFrom
(
void
*
ptr
);
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
);
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
);
void
getStatistics
(
HeapStatistics
*
stats
);
...
...
@@ -517,7 +517,7 @@ public:
void
free
(
GCAllocation
*
alloc
);
GCAllocation
*
allocationFrom
(
void
*
ptr
);
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
);
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
);
void
getStatistics
(
HeapStatistics
*
stats
);
...
...
@@ -623,18 +623,7 @@ public:
void
free
(
GCAllocation
*
alloc
)
{
destructContents
(
alloc
);
if
(
large_arena
.
contains
(
alloc
))
{
large_arena
.
free
(
alloc
);
return
;
}
if
(
huge_arena
.
contains
(
alloc
))
{
huge_arena
.
free
(
alloc
);
return
;
}
assert
(
small_arena
.
contains
(
alloc
));
small_arena
.
free
(
alloc
);
_setFree
(
alloc
);
}
// not thread safe:
...
...
@@ -654,10 +643,10 @@ public:
void
forEachSmallArenaReference
(
std
::
function
<
void
(
GCAllocation
*
,
size_t
)
>
f
)
{
small_arena
.
forEachReference
(
f
);
}
// not thread safe:
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
)
{
small_arena
.
freeUnmarked
(
weakly_referenced
);
large_arena
.
freeUnmarked
(
weakly_referenced
);
huge_arena
.
freeUnmarked
(
weakly_referenced
);
void
freeUnmarked
(
std
::
vector
<
Box
*>&
weakly_referenced
,
std
::
vector
<
BoxedClass
*>&
classes_to_free
)
{
small_arena
.
freeUnmarked
(
weakly_referenced
,
classes_to_free
);
large_arena
.
freeUnmarked
(
weakly_referenced
,
classes_to_free
);
huge_arena
.
freeUnmarked
(
weakly_referenced
,
classes_to_free
);
}
void
prepareForCollection
()
{
...
...
@@ -673,6 +662,27 @@ public:
}
void
dumpHeapStatistics
(
int
level
);
private:
// Internal function that just marks the allocation as being freed, without doing any
// Python-semantics on it.
void
_setFree
(
GCAllocation
*
alloc
)
{
if
(
large_arena
.
contains
(
alloc
))
{
large_arena
.
free
(
alloc
);
return
;
}
if
(
huge_arena
.
contains
(
alloc
))
{
huge_arena
.
free
(
alloc
);
return
;
}
assert
(
small_arena
.
contains
(
alloc
));
small_arena
.
free
(
alloc
);
}
friend
void
markPhase
();
friend
void
runCollection
();
};
extern
Heap
global_heap
;
...
...
src/runtime/builtin_modules/builtins.cpp
View file @
138c2ea1
...
...
@@ -532,8 +532,15 @@ Box* getattrFuncInternal(BoxedFunctionBase* func, CallRewriteArgs* rewrite_args,
// value is fixed.
if
(
!
PyString_CheckExact
(
_str
)
&&
!
PyUnicode_CheckExact
(
_str
))
rewrite_args
=
NULL
;
else
else
{
if
(
PyString_CheckExact
(
_str
)
&&
PyString_CHECK_INTERNED
(
_str
)
==
SSTATE_INTERNED_IMMORTAL
)
{
// can avoid keeping the extra gc reference
}
else
{
rewrite_args
->
rewriter
->
addGCReference
(
_str
);
}
rewrite_args
->
arg2
->
addGuard
((
intptr_t
)
arg2
);
}
}
try
{
...
...
test/tests/class_and_instance_freeing.py
0 → 100644
View file @
138c2ea1
import
gc
l
=
[]
for
i
in
xrange
(
5100
):
class
C
(
object
):
pass
C
.
l
=
[
C
()
for
_
in
xrange
(
17
)]
if
i
%
10
==
0
:
print
i
# gc.collect()
# for i in xrange(100):
# l.append('=' * i)
test/tests/getattrfunc_rewriting.py
0 → 100644
View file @
138c2ea1
# Regression test: make sure we guard / invalidate our getattr() rewrites:
class
C
(
object
):
pass
c
=
C
()
for
i
in
xrange
(
100
):
setattr
(
c
,
"attr%d"
%
i
,
i
)
def
f
():
for
j
in
xrange
(
2
,
10
):
t
=
0
for
i
in
xrange
(
2000
):
for
k
in
xrange
(
j
):
t
+=
getattr
(
c
,
"attr%d"
%
k
)
print
t
f
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment