for (PageMetadata* p : new_space_evacuation_pages_) { MemoryChunk* chunk = p->Chunk(); AllocationSpace owner_identity = p->owner_identity(); USE(owner_identity); if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)) { chunk->ClearFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION); // The in-sandbox page flags may be corrupted, so we currently need // this check here to make sure that this doesn't lead to further // confusion about the state of MemoryChunkMetadata objects. // TODO(377724745): if we move (some of) the flags into the trusted // MemoryChunkMetadata object, then this wouldn't be necessary. SBXCHECK_EQ(OLD_SPACE, owner_identity); sweeper_->AddPage(OLD_SPACE, p); } elseif (v8_flags.minor_ms) { // Sweep non-promoted pages to add them back to the free list. DCHECK_EQ(NEW_SPACE, owner_identity); DCHECK_EQ(0, p->live_bytes()); DCHECK(p->SweepingDone()); PagedNewSpace* space = heap_->paged_new_space(); if (space->ShouldReleaseEmptyPage()) { ReleasePage(space->paged_space(), p); } else { sweeper_->SweepEmptyNewSpacePage(p); } } } new_space_evacuation_pages_.clear();
for (LargePageMetadata* p : promoted_large_pages_) { MemoryChunk* chunk = p->Chunk(); DCHECK(chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION)); chunk->ClearFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION); Tagged<HeapObject> object = p->GetObject(); if (!v8_flags.sticky_mark_bits) { MarkBit::From(object).Clear(); p->SetLiveBytes(0); } p->marking_progress_tracker().ResetIfEnabled(); } promoted_large_pages_.clear();
for (PageMetadata* p : old_space_evacuation_pages_) { MemoryChunk* chunk = p->Chunk(); if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) { sweeper_->AddPage(p->owner_identity(), p); chunk->ClearFlagSlow(MemoryChunk::COMPACTION_WAS_ABORTED); } } }
// Evacuation of new space pages cannot be aborted, so it needs to run // before old space evacuation. bool force_page_promotion = heap_->IsGCWithStack() && !v8_flags.compact_with_stack; for (PageMetadata* page : new_space_evacuation_pages_) { intptr_t live_bytes_on_page = page->live_bytes(); DCHECK_LT(0, live_bytes_on_page); live_bytes += live_bytes_on_page; MemoryReductionMode memory_reduction_mode = heap_->ShouldReduceMemory() ? MemoryReductionMode::kShouldReduceMemory : MemoryReductionMode::kNone; if (ShouldMovePage(page, live_bytes_on_page, memory_reduction_mode) || force_page_promotion || page->Chunk()->IsQuarantined()) { EvacuateNewToOldSpacePageVisitor::Move(page); page->Chunk()->SetFlagNonExecutable(MemoryChunk::PAGE_NEW_OLD_PROMOTION); DCHECK_EQ(heap_->old_space(), page->owner()); // The move added page->allocated_bytes to the old space, but we are // going to sweep the page and add page->live_byte_count. heap_->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(), page); } evacuation_items.emplace_back(ParallelWorkItem{}, page); }
if (heap_->IsGCWithStack()) { if (!v8_flags.compact_with_stack) { for (PageMetadata* page : old_space_evacuation_pages_) { ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk()); } } elseif (!v8_flags.compact_code_space_with_stack || heap_->isolate()->InFastCCall()) { // For fast C calls we cannot patch the return address in the native stack // frame if we would relocate InstructionStream objects. for (PageMetadata* page : old_space_evacuation_pages_) { if (page->owner_identity() != CODE_SPACE) continue; ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk()); } } } else { // There should always be a stack when we are in a fast c call. DCHECK(!heap_->isolate()->InFastCCall()); }
if (v8_flags.stress_compaction || v8_flags.stress_compaction_random) { // Stress aborting of evacuation by aborting ~10% of evacuation candidates // when stress testing. constdouble kFraction = 0.05;
for (PageMetadata* page : old_space_evacuation_pages_) { if (heap_->isolate()->fuzzer_rng()->NextDouble() < kFraction) { ReportAbortedEvacuationCandidateDueToFlags(page, page->Chunk()); } } }
for (PageMetadata* page : old_space_evacuation_pages_) { MemoryChunk* chunk = page->Chunk(); if (chunk->IsFlagSet(MemoryChunk::COMPACTION_WAS_ABORTED)) continue;
// Promote young generation large objects. if (auto* new_lo_space = heap_->new_lo_space()) { for (auto it = new_lo_space->begin(); it != new_lo_space->end();) { LargePageMetadata* current = *(it++); Tagged<HeapObject> object = current->GetObject(); // The black-allocated flag was already cleared in SweepLargeSpace(). DCHECK_IMPLIES(v8_flags.black_allocated_pages, !HeapLayout::InBlackAllocatedPage(object)); if (marking_state_->IsMarked(object)) { heap_->lo_space()->PromoteNewLargeObject(current); current->Chunk()->SetFlagNonExecutable( MemoryChunk::PAGE_NEW_OLD_PROMOTION); promoted_large_pages_.push_back(current); evacuation_items.emplace_back(ParallelWorkItem{}, current); } } new_lo_space->set_objects_size(0); }