函数位置://v8/src/heap/mark-compact.cc,包含parallel Evacuate以及updatePointer两个步骤:

showLineNumbers
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE);

{
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
EvacuatePrologue();
}

{
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
EvacuatePagesInParallel();
}

UpdatePointersAfterEvacuation();

{
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);

for (PageMetadata* p : new_space_evacuation_pages_) {
MemoryChunk* chunk = p->Chunk();
AllocationSpace owner_identity = p->owner_identity();
USE(owner_identity);
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) {
chunk->ClearFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION);
// The in-sandbox page flags may be corrupted, so we currently need
// this check here to make sure that this doesn't lead to further
// confusion about the state of MemoryChunkMetadata objects.
// TODO(377724745): if we move (some of) the flags into the trusted
// MemoryChunkMetadata object, then this wouldn't be necessary.
SBXCHECK_EQ(OLD_SPACE, owner_identity);
sweeper_->AddPage(OLD_SPACE, p);
} else if (v8_flags.minor_ms) {
// Sweep non-promoted pages to add them back to the free list.
DCHECK_EQ(NEW_SPACE, owner_identity);
DCHECK_EQ(0, p->live_bytes());
DCHECK(p->SweepingDone());
PagedNewSpace* space = heap_->paged_new_space();
if (space->ShouldReleaseEmptyPage()) {
ReleasePage(space->paged_space(), p);
} else {
sweeper_->SweepEmptyNewSpacePage(p);
}
}
}
new_space_evacuation_pages_.clear();

for (LargePageMetadata* p : promoted_large_pages_) {
MemoryChunk* chunk = p->Chunk();
DCHECK(chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
chunk->ClearFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION);
Tagged<HeapObject> object = p->GetObject();
if (!v8_flags.sticky_mark_bits) {
MarkBit::From(object).Clear();
p->SetLiveBytes(0);
}
p->marking_progress_tracker().ResetIfEnabled();
}
promoted_large_pages_.clear();

for (PageMetadata* p : old_space_evacuation_pages_) {
MemoryChunk* chunk = p->Chunk();
if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) {
sweeper_->AddPage(p->owner_identity(), p);
chunk->ClearFlagSlow(MemoryChunk::COMPACTION_WAS_ABORTED);
}
}
}

{
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
EvacuateEpilogue();
}

#ifdef VERIFY_HEAP
if (v8_flags.verify_heap && !sweeper_->sweeping_in_progress()) {
EvacuationVerifier verifier(heap_);
verifier.Run();
}
#endif // VERIFY_HEAP
}

MarkCompactCollector::EvacuatePagesInParallel()函数,负责对内存页的Evacuate步骤:

showLineNumbers
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
void MarkCompactCollector::EvacuatePagesInParallel() {
std::vector<std::pair<ParallelWorkItem, MutablePageMetadata*>>
evacuation_items;
intptr_t live_bytes = 0;

PinPreciseRootsIfNeeded();

// Evacuation of new space pages cannot be aborted, so it needs to run
// before old space evacuation.
bool force_page_promotion =
heap_->IsGCWithStack() && !v8_flags.compact_with_stack;
for (PageMetadata* page : new_space_evacuation_pages_) {
intptr_t live_bytes_on_page = page->live_bytes();
DCHECK_LT(0, live_bytes_on_page);
live_bytes += live_bytes_on_page;
MemoryReductionMode memory_reduction_mode =
heap_->ShouldReduceMemory() ? MemoryReductionMode::kShouldReduceMemory
: MemoryReductionMode::kNone;
if (ShouldMovePage(page, live_bytes_on_page, memory_reduction_mode) ||
force_page_promotion || page->Chunk()->IsQuarantined()) {
EvacuateNewToOldSpacePageVisitor::Move(page);
page->Chunk()->SetFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION);
DCHECK_EQ(heap_->old_space(), page->owner());
// The move added page->allocated_bytes to the old space, but we are
// going to sweep the page and add page->live_byte_count.
heap_->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(), page);
}
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}

if (heap_->IsGCWithStack()) {
if (!v8_flags.compact_with_stack) {
for (PageMetadata* page : old_space_evacuation_pages_) {
ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
}
} else if (!v8_flags.compact_code_space_with_stack ||
heap_->isolate()->InFastCCall()) {
// For fast C calls we cannot patch the return address in the native stack
// frame if we would relocate InstructionStream objects.
for (PageMetadata* page : old_space_evacuation_pages_) {
if (page->owner_identity() != CODE_SPACE) continue;
ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
}
}
} else {
// There should always be a stack when we are in a fast c call.
DCHECK(!heap_->isolate()->InFastCCall());
}

if (v8_flags.stress_compaction || v8_flags.stress_compaction_random) {
// Stress aborting of evacuation by aborting ~10% of evacuation candidates
// when stress testing.
const double kFraction = 0.05;

for (PageMetadata* page : old_space_evacuation_pages_) {
if (heap_->isolate()->fuzzer_rng()->NextDouble() < kFraction) {
ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk());
}
}
}

for (PageMetadata* page : old_space_evacuation_pages_) {
MemoryChunk* chunk = page->Chunk();
if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) continue;

live_bytes += page->live_bytes();
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}

// Promote young generation large objects.
if (auto* new_lo_space = heap_->new_lo_space()) {
for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
LargePageMetadata* current = *(it++);
Tagged<HeapObject> object = current->GetObject();
// The black-allocated flag was already cleared in SweepLargeSpace().
DCHECK_IMPLIES(v8_flags.black_allocated_pages,
!HeapLayout::InBlackAllocatedPage(object));
if (marking_state_->IsMarked(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->Chunk()->SetFlagNonExecutable(
MemoryChunk::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current);
evacuation_items.emplace_back(ParallelWorkItem{}, current);
}
}
new_lo_space->set_objects_size(0);
}

const size_t pages_count = evacuation_items.size();
size_t wanted_num_tasks = 0;
if (!evacuation_items.empty()) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"MarkCompactCollector::EvacuatePagesInParallel", "pages",
evacuation_items.size());

wanted_num_tasks = CreateAndExecuteEvacuationTasks(
heap_, this, std::move(evacuation_items));
}

const size_t aborted_pages = PostProcessAbortedEvacuationCandidates();

if (v8_flags.trace_evacuation) {
TraceEvacuation(heap_->isolate(), pages_count, wanted_num_tasks, live_bytes,
aborted_pages);
}
}