Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/heap.cc')
-rw-r--r--deps/v8/src/heap/heap.cc896
1 files changed, 517 insertions, 379 deletions
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 518bbcf1625..606ba0fe65f 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -13,6 +13,7 @@
#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/once.h"
+#include "src/base/platform/mutex.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/accessors.h"
#include "src/codegen/assembler-inl.h"
@@ -40,16 +41,18 @@
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/large-spaces.h"
#include "src/heap/local-heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-measurement.h"
#include "src/heap/memory-reducer.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/read-only-heap.h"
-#include "src/heap/remembered-set.h"
+#include "src/heap/remembered-set-inl.h"
#include "src/heap/safepoint.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
@@ -71,7 +74,7 @@
#include "src/objects/slots-inl.h"
#include "src/regexp/regexp.h"
#include "src/snapshot/embedded/embedded-data.h"
-#include "src/snapshot/serializer-common.h"
+#include "src/snapshot/serializer-deserializer.h"
#include "src/snapshot/snapshot.h"
#include "src/strings/string-stream.h"
#include "src/strings/unicode-decoder.h"
@@ -202,8 +205,9 @@ Heap::Heap()
: isolate_(isolate()),
memory_pressure_level_(MemoryPressureLevel::kNone),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
- safepoint_(new Safepoint(this)),
- external_string_table_(this) {
+ safepoint_(new GlobalSafepoint(this)),
+ external_string_table_(this),
+ collection_barrier_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
@@ -1108,14 +1112,32 @@ void Heap::DeoptMarkedAllocationSites() {
Deoptimizer::DeoptimizeMarkedCode(isolate_);
}
-
-void Heap::GarbageCollectionEpilogue() {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
- if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
- ZapFromSpace();
+void Heap::GarbageCollectionEpilogueInSafepoint() {
+#define UPDATE_COUNTERS_FOR_SPACE(space) \
+ isolate_->counters()->space##_bytes_available()->Set( \
+ static_cast<int>(space()->Available())); \
+ isolate_->counters()->space##_bytes_committed()->Set( \
+ static_cast<int>(space()->CommittedMemory())); \
+ isolate_->counters()->space##_bytes_used()->Set( \
+ static_cast<int>(space()->SizeOfObjects()));
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
+ if (space()->CommittedMemory() > 0) { \
+ isolate_->counters()->external_fragmentation_##space()->AddSample( \
+ static_cast<int>(100 - (space()->SizeOfObjects() * 100.0) / \
+ space()->CommittedMemory())); \
}
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+ UPDATE_COUNTERS_FOR_SPACE(space) \
+ UPDATE_FRAGMENTATION_FOR_SPACE(space)
- AllowHeapAllocation for_the_rest_of_the_epilogue;
+ UPDATE_COUNTERS_FOR_SPACE(new_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+ UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
+#undef UPDATE_COUNTERS_FOR_SPACE
+#undef UPDATE_FRAGMENTATION_FOR_SPACE
+#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
#ifdef DEBUG
// Old-to-new slot sets must be empty after each collection.
@@ -1133,6 +1155,15 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_code_stats) ReportCodeStatistics("After GC");
if (FLAG_check_handle_count) CheckHandleCount();
#endif
+}
+
+void Heap::GarbageCollectionEpilogue() {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
+ if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
+ ZapFromSpace();
+ }
+
+ AllowHeapAllocation for_the_rest_of_the_epilogue;
UpdateMaximumCommitted();
@@ -1160,33 +1191,6 @@ void Heap::GarbageCollectionEpilogue() {
static_cast<int>(MaximumCommittedMemory() / KB));
}
-#define UPDATE_COUNTERS_FOR_SPACE(space) \
- isolate_->counters()->space##_bytes_available()->Set( \
- static_cast<int>(space()->Available())); \
- isolate_->counters()->space##_bytes_committed()->Set( \
- static_cast<int>(space()->CommittedMemory())); \
- isolate_->counters()->space##_bytes_used()->Set( \
- static_cast<int>(space()->SizeOfObjects()));
-#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
- if (space()->CommittedMemory() > 0) { \
- isolate_->counters()->external_fragmentation_##space()->AddSample( \
- static_cast<int>(100 - \
- (space()->SizeOfObjects() * 100.0) / \
- space()->CommittedMemory())); \
- }
-#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
- UPDATE_COUNTERS_FOR_SPACE(space) \
- UPDATE_FRAGMENTATION_FOR_SPACE(space)
-
- UPDATE_COUNTERS_FOR_SPACE(new_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
-#undef UPDATE_COUNTERS_FOR_SPACE
-#undef UPDATE_FRAGMENTATION_FOR_SPACE
-#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
-
#ifdef DEBUG
ReportStatisticsAfterGC();
#endif // DEBUG
@@ -1197,16 +1201,6 @@ void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
-
- if (FLAG_harmony_weak_refs &&
- isolate()->host_cleanup_finalization_group_callback()) {
- HandleScope handle_scope(isolate());
- Handle<JSFinalizationRegistry> finalization_registry;
- while (
- DequeueDirtyJSFinalizationRegistry().ToHandle(&finalization_registry)) {
- isolate()->RunHostCleanupFinalizationGroupCallback(finalization_registry);
- }
- }
}
class GCCallbacksScope {
@@ -1387,16 +1381,15 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
isolate()->ClearSerializerData();
- set_current_gc_flags(kReduceMemoryFootprintMask);
+ set_current_gc_flags(
+ kReduceMemoryFootprintMask |
+ (gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC
+ : 0));
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
- const v8::GCCallbackFlags callback_flags =
- gc_reason == GarbageCollectionReason::kLowMemoryNotification
- ? v8::kGCCallbackFlagForced
- : v8::kGCCallbackFlagCollectAllAvailableGarbage;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
- if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) &&
+ if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
@@ -1446,9 +1439,15 @@ void Heap::ReportExternalMemoryPressure() {
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory);
- if (isolate()->isolate_data()->external_memory_ >
- (isolate()->isolate_data()->external_memory_low_since_mark_compact_ +
- external_memory_hard_limit())) {
+ int64_t current = isolate()->isolate_data()->external_memory_;
+ int64_t baseline =
+ isolate()->isolate_data()->external_memory_low_since_mark_compact_;
+ int64_t limit = isolate()->isolate_data()->external_memory_limit_;
+ TRACE_EVENT2(
+ "devtools.timeline,v8", "V8.ExternalMemoryPressure", "external_memory_mb",
+ static_cast<int>((current - baseline) / MB), "external_memory_limit_mb",
+ static_cast<int>((limit - baseline) / MB));
+ if (current > baseline + external_memory_hard_limit()) {
CollectAllGarbage(
kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
@@ -1472,10 +1471,7 @@ void Heap::ReportExternalMemoryPressure() {
const double kMaxStepSize = 10;
const double ms_step = Min(
kMaxStepSize,
- Max(kMinStepSize,
- static_cast<double>(isolate()->isolate_data()->external_memory_) /
- isolate()->isolate_data()->external_memory_limit_ *
- kMinStepSize));
+ Max(kMinStepSize, static_cast<double>(current) / limit * kMinStepSize));
const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
@@ -1516,7 +1512,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
const v8::GCCallbackFlags gc_callback_flags) {
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
- is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced;
+ is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced ||
+ current_gc_flags_ & kForcedGC;
DevToolsTraceEventScope devtools_trace_event_scope(
this, IsYoungGenerationCollector(collector) ? "MinorGC" : "MajorGC",
@@ -1558,7 +1555,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
}
- bool next_gc_likely_to_collect_more = false;
+ size_t freed_global_handles = 0;
+
size_t committed_memory_before = 0;
if (collector == MARK_COMPACTOR) {
@@ -1584,18 +1582,69 @@ bool Heap::CollectGarbage(AllocationSpace space,
OptionalTimedHistogramScope histogram_timer_priority_scope(
gc_type_priority_timer, isolate_, mode);
- next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, gc_callback_flags);
+ if (!IsYoungGenerationCollector(collector)) {
+ PROFILE(isolate_, CodeMovingGCEvent());
+ }
+
+ GCType gc_type = collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact
+ : kGCTypeScavenge;
+ {
+ GCCallbacksScope scope(this);
+ // Temporary override any embedder stack state as callbacks may create
+ // their own state on the stack and recursively trigger GC.
+ EmbedderStackStateScope embedder_scope(
+ local_embedder_heap_tracer(),
+ EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+ }
+ }
+
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ tp_heap_->CollectGarbage();
+ } else {
+ freed_global_handles +=
+ PerformGarbageCollection(collector, gc_callback_flags);
+ }
+ // Clear is_current_gc_forced now that the current GC is complete. Do this
+ // before GarbageCollectionEpilogue() since that could trigger another
+ // unforced GC.
+ is_current_gc_forced_ = false;
+
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ gc_post_processing_depth_++;
+ {
+ AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
+ freed_global_handles +=
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, gc_callback_flags);
+ }
+ gc_post_processing_depth_--;
+ }
+
+ {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ AllowJavascriptExecution allow_js(isolate());
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+ }
+ }
if (collector == MARK_COMPACTOR || collector == SCAVENGER) {
tracer()->RecordGCPhasesHistograms(gc_type_timer);
}
}
- // Clear is_current_gc_forced now that the current GC is complete. Do this
- // before GarbageCollectionEpilogue() since that could trigger another
- // unforced GC.
- is_current_gc_forced_ = false;
-
GarbageCollectionEpilogue();
if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
isolate()->CheckDetachedContextsAfterGC();
@@ -1610,11 +1659,9 @@ bool Heap::CollectGarbage(AllocationSpace space,
// Trigger one more GC if
// - this GC decreased committed memory,
// - there is high fragmentation,
- // - there are live detached contexts.
event.next_gc_likely_to_collect_more =
(committed_memory_before > committed_memory_after + MB) ||
- HasHighFragmentation(used_memory_after, committed_memory_after) ||
- (detached_contexts().length() > 0);
+ HasHighFragmentation(used_memory_after, committed_memory_after);
event.committed_memory = committed_memory_after;
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
@@ -1634,6 +1681,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
+ collection_barrier_.Increment();
+
// Start incremental marking for the next cycle. We do this only for scavenger
// to avoid a loop where mark-compact causes another mark-compact.
if (IsYoungGenerationCollector(collector)) {
@@ -1642,7 +1691,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
kGCCallbackScheduleIdleGarbageCollection);
}
- return next_gc_likely_to_collect_more;
+ return freed_global_handles > 0;
}
@@ -1659,9 +1708,10 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
if (!isolate()->context().is_null()) {
RemoveDirtyFinalizationRegistriesOnContext(isolate()->raw_native_context());
+ isolate()->raw_native_context().set_retained_maps(
+ ReadOnlyRoots(this).empty_weak_array_list());
}
- number_of_disposed_maps_ = retained_maps().length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
}
@@ -1670,6 +1720,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags) {
DCHECK(incremental_marking()->IsStopped());
+ SafepointScope safepoint(this);
set_current_gc_flags(gc_flags);
current_gc_callback_flags_ = gc_callback_flags;
incremental_marking()->Start(gc_reason);
@@ -1692,6 +1743,21 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
}
}
+void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
+ if (!incremental_marking()->IsStopped() ||
+ !incremental_marking()->CanBeActivated()) {
+ return;
+ }
+
+ const size_t old_generation_space_available = OldGenerationSpaceAvailable();
+ const size_t global_memory_available = GlobalMemoryAvailable();
+
+ if (old_generation_space_available < new_space_->Capacity() ||
+ global_memory_available < new_space_->Capacity()) {
+ incremental_marking()->incremental_marking_job()->ScheduleTask(this);
+ }
+}
+
void Heap::StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags) {
@@ -1947,6 +2013,26 @@ void Heap::EnsureFromSpaceIsCommitted() {
FatalProcessOutOfMemory("Committing semi space failed.");
}
+void Heap::CollectionBarrier::Increment() {
+ base::MutexGuard guard(&mutex_);
+ requested_ = false;
+ cond_.NotifyAll();
+}
+
+void Heap::CollectionBarrier::Wait() {
+ base::MutexGuard guard(&mutex_);
+
+ if (!requested_) {
+ heap_->MemoryPressureNotification(MemoryPressureLevel::kCritical, false);
+ requested_ = true;
+ }
+
+ while (requested_) {
+ cond_.Wait(&mutex_);
+ }
+}
+
+void Heap::RequestAndWaitForCollection() { collection_barrier_.Wait(); }
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@@ -1970,52 +2056,22 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
tracer()->AddSurvivalRatio(survival_rate);
}
-bool Heap::PerformGarbageCollection(
+size_t Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
DisallowJavascriptExecution no_js(isolate());
-
- if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- return tp_heap_->CollectGarbage();
- }
-
- size_t freed_global_handles = 0;
-
- if (!IsYoungGenerationCollector(collector)) {
- PROFILE(isolate_, CodeMovingGCEvent());
- }
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyStringTable(this->isolate());
+ base::Optional<SafepointScope> optional_safepoint_scope;
+ if (FLAG_local_heaps) {
+ optional_safepoint_scope.emplace(this);
+ // Fill and reset all LABs
+ safepoint()->IterateLocalHeaps(
+ [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
}
-#endif
-
- GCType gc_type =
- collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
-
- {
- GCCallbacksScope scope(this);
- // Temporary override any embedder stack state as callbacks may create their
- // own state on the stack and recursively trigger GC.
- EmbedderStackStateScope embedder_scope(
- local_embedder_heap_tracer(),
- EmbedderHeapTracer::EmbedderStackState::kUnknown);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
- }
- }
-
- if (FLAG_local_heaps) safepoint()->Start();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
+ tracer()->StartInSafepoint();
EnsureFromSpaceIsCommitted();
@@ -2024,33 +2080,13 @@ bool Heap::PerformGarbageCollection(
switch (collector) {
case MARK_COMPACTOR:
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
MarkCompact();
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which
- // can cause another GC. Take into account the objects promoted during
- // GC.
- old_generation_allocation_counter_at_last_gc_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
break;
case MINOR_MARK_COMPACTOR:
MinorMarkCompact();
break;
case SCAVENGER:
- if ((fast_promotion_mode_ &&
- CanExpandOldGeneration(new_space()->Size() +
- new_lo_space()->Size()))) {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kFastPromotionDuringScavenge);
- EvacuateYoungGeneration();
- } else {
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kRegularScavenge);
-
- Scavenge();
- }
+ Scavenge();
break;
}
@@ -2072,6 +2108,13 @@ bool Heap::PerformGarbageCollection(
isolate_->counters()->objs_since_last_young()->Set(0);
+ isolate_->eternal_handles()->PostGarbageCollectionProcessing();
+
+ // Update relocatables.
+ Relocatable::PostGarbageCollectionProcessing(isolate_);
+
+ size_t freed_global_handles;
+
{
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
// First round weak callbacks are not supposed to allocate and trigger
@@ -2095,47 +2138,14 @@ bool Heap::PerformGarbageCollection(
Verify();
}
#endif
- if (FLAG_local_heaps) safepoint()->End();
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
- gc_post_processing_depth_++;
- {
- AllowHeapAllocation allow_allocation;
- AllowJavascriptExecution allow_js(isolate());
- freed_global_handles +=
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, gc_callback_flags);
- }
- gc_post_processing_depth_--;
- }
-
- isolate_->eternal_handles()->PostGarbageCollectionProcessing();
-
- // Update relocatables.
- Relocatable::PostGarbageCollectionProcessing(isolate_);
RecomputeLimits(collector);
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- AllowJavascriptExecution allow_js(isolate());
- TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
- }
- }
+ GarbageCollectionEpilogueInSafepoint();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- VerifyStringTable(this->isolate());
- }
-#endif
+ tracer()->StopInSafepoint();
- return freed_global_handles > 0;
+ return freed_global_handles;
}
void Heap::RecomputeLimits(GarbageCollector collector) {
@@ -2247,10 +2257,11 @@ void Heap::MarkCompact() {
LOG(isolate_, ResourceEvent("markcompact", "begin"));
- uint64_t size_of_objects_before_gc = SizeOfObjects();
-
CodeSpaceMemoryModificationScope code_modifcation(this);
+ UpdateOldGenerationAllocationCounter();
+ uint64_t size_of_objects_before_gc = SizeOfObjects();
+
mark_compact_collector()->Prepare();
ms_count_++;
@@ -2266,6 +2277,14 @@ void Heap::MarkCompact() {
if (FLAG_allocation_site_pretenuring) {
EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
}
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during
+ // GC.
+ old_generation_allocation_counter_at_last_gc_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
+ global_memory_at_last_gc_ = GlobalSizeOfObjects();
}
void Heap::MinorMarkCompact() {
@@ -2380,6 +2399,16 @@ void Heap::EvacuateYoungGeneration() {
}
void Heap::Scavenge() {
+ if ((fast_promotion_mode_ &&
+ CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size()))) {
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kFastPromotionDuringScavenge);
+ EvacuateYoungGeneration();
+ return;
+ }
+ tracer()->NotifyYoungGenerationHandling(
+ YoungGenerationHandling::kRegularScavenge);
+
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
@@ -2813,7 +2842,7 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
return 0;
}
-
+// static
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
return kTaggedSize;
@@ -2826,24 +2855,28 @@ size_t Heap::GetCodeRangeReservedAreaSize() {
return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
}
-HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
- CreateFillerObjectAt(object.address(), filler_size, ClearRecordedSlots::kNo);
+// static
+HeapObject Heap::PrecedeWithFiller(ReadOnlyRoots roots, HeapObject object,
+ int filler_size) {
+ CreateFillerObjectAt(roots, object.address(), filler_size,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
return HeapObject::FromAddress(object.address() + filler_size);
}
-HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
- int allocation_size,
+// static
+HeapObject Heap::AlignWithFiller(ReadOnlyRoots roots, HeapObject object,
+ int object_size, int allocation_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK_LT(0, filler_size);
int pre_filler = GetFillToAlign(object.address(), alignment);
if (pre_filler) {
- object = PrecedeWithFiller(object, pre_filler);
+ object = PrecedeWithFiller(roots, object, pre_filler);
filler_size -= pre_filler;
}
if (filler_size) {
- CreateFillerObjectAt(object.address() + object_size, filler_size,
- ClearRecordedSlots::kNo);
+ CreateFillerObjectAt(roots, object.address() + object_size, filler_size,
+ ClearFreedMemoryMode::kDontClearFreedMemory);
}
return object;
}
@@ -2929,47 +2962,83 @@ void Heap::FlushNumberStringCache() {
}
}
-HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots clear_slots_mode,
- ClearFreedMemoryMode clear_memory_mode) {
+namespace {
+
+HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
+ ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return HeapObject();
HeapObject filler = HeapObject::FromAddress(addr);
- bool clear_memory =
- (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory ||
- clear_slots_mode == ClearRecordedSlots::kYes);
if (size == kTaggedSize) {
- filler.set_map_after_allocation(
- Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
- SKIP_WRITE_BARRIER);
+ filler.set_map_after_allocation(roots.unchecked_one_pointer_filler_map(),
+ SKIP_WRITE_BARRIER);
} else if (size == 2 * kTaggedSize) {
- filler.set_map_after_allocation(
- Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
- SKIP_WRITE_BARRIER);
- if (clear_memory) {
+ filler.set_map_after_allocation(roots.unchecked_two_pointer_filler_map(),
+ SKIP_WRITE_BARRIER);
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
AtomicSlot slot(ObjectSlot(addr) + 1);
*slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
}
} else {
DCHECK_GT(size, 2 * kTaggedSize);
- filler.set_map_after_allocation(
- Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
- SKIP_WRITE_BARRIER);
+ filler.set_map_after_allocation(roots.unchecked_free_space_map(),
+ SKIP_WRITE_BARRIER);
FreeSpace::cast(filler).relaxed_write_size(size);
- if (clear_memory) {
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
}
}
- if (clear_slots_mode == ClearRecordedSlots::kYes &&
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
- ClearRecordedSlotRange(addr, addr + size);
- }
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
DCHECK((filler.map_slot().contains_value(kNullAddress) &&
- !deserialization_complete_) ||
+ !Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
filler.map().IsMap());
+
+ return filler;
+}
+
+#ifdef DEBUG
+void VerifyNoNeedToClearSlots(Address start, Address end) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(start);
+ // TODO(ulan): Support verification of large pages.
+ if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
+ Space* space = chunk->owner();
+ if (static_cast<PagedSpace*>(space)->is_off_thread_space()) return;
+ space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
+}
+#else
+void VerifyNoNeedToClearSlots(Address start, Address end) {}
+#endif // DEBUG
+
+} // namespace
+
+// static
+HeapObject Heap::CreateFillerObjectAt(ReadOnlyRoots roots, Address addr,
+ int size,
+ ClearFreedMemoryMode clear_memory_mode) {
+ // TODO(leszeks): Verify that no slots need to be recorded.
+ HeapObject filler =
+ CreateFillerObjectAtImpl(roots, addr, size, clear_memory_mode);
+ VerifyNoNeedToClearSlots(addr, addr + size);
+ return filler;
+}
+
+HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots clear_slots_mode) {
+ if (size == 0) return HeapObject();
+ HeapObject filler = CreateFillerObjectAtImpl(
+ ReadOnlyRoots(this), addr, size,
+ clear_slots_mode == ClearRecordedSlots::kYes
+ ? ClearFreedMemoryMode::kClearFreedMemory
+ : ClearFreedMemoryMode::kDontClearFreedMemory);
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
+ if (clear_slots_mode == ClearRecordedSlots::kYes) {
+ ClearRecordedSlotRange(addr, addr + size);
+ } else {
+ VerifyNoNeedToClearSlots(addr, addr + size);
+ }
+ }
return filler;
}
@@ -3158,7 +3227,7 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
// to the original FixedArray (which is now the filler object).
LeftTrimmerVerifierRootVisitor root_visitor(object);
ReadOnlyRoots(this).Iterate(&root_visitor);
- IterateRoots(&root_visitor, VISIT_ALL);
+ IterateRoots(&root_visitor, {});
}
#endif // ENABLE_SLOW_DCHECKS
@@ -3440,6 +3509,28 @@ void Heap::FinalizeIncrementalMarkingAtomically(
CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
}
+void Heap::InvokeIncrementalMarkingPrologueCallbacks() {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+ }
+}
+
+void Heap::InvokeIncrementalMarkingEpilogueCallbacks() {
+ GCCallbacksScope scope(this);
+ if (scope.CheckReenter()) {
+ AllowHeapAllocation allow_allocation;
+ TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
+ VMState<EXTERNAL> state(isolate_);
+ HandleScope handle_scope(isolate_);
+ CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
+ }
+}
+
void Heap::FinalizeIncrementalMarkingIncrementally(
GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
@@ -3456,27 +3547,10 @@ void Heap::FinalizeIncrementalMarkingIncrementally(
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
- }
+ SafepointScope safepoint(this);
+ InvokeIncrementalMarkingPrologueCallbacks();
incremental_marking()->FinalizeIncrementally();
- {
- GCCallbacksScope scope(this);
- if (scope.CheckReenter()) {
- AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
- VMState<EXTERNAL> state(isolate_);
- HandleScope handle_scope(isolate_);
- CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
- }
- }
+ InvokeIncrementalMarkingEpilogueCallbacks();
}
void Heap::RegisterDeserializedObjectsForBlackAllocation(
@@ -3758,19 +3832,15 @@ void Heap::CheckMemoryPressure() {
// the finalizers.
memory_pressure_level_ = MemoryPressureLevel::kNone;
if (memory_pressure_level == MemoryPressureLevel::kCritical) {
+ TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
CollectGarbageOnMemoryPressure();
} else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
+ TRACE_EVENT0("devtools.timeline,v8", "V8.CheckMemoryPressure");
StartIncrementalMarking(kReduceMemoryFootprintMask,
GarbageCollectionReason::kMemoryPressure);
}
}
- if (memory_reducer_) {
- MemoryReducer::Event event;
- event.type = MemoryReducer::kPossibleGarbage;
- event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyPossibleGarbage(event);
- }
}
void Heap::CollectGarbageOnMemoryPressure() {
@@ -3811,6 +3881,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
void Heap::MemoryPressureNotification(MemoryPressureLevel level,
bool is_isolate_locked) {
+ TRACE_EVENT1("devtools.timeline,v8", "V8.MemoryPressureNotification", "level",
+ static_cast<int>(level));
MemoryPressureLevel previous = memory_pressure_level_;
memory_pressure_level_ = level;
if ((previous != MemoryPressureLevel::kCritical &&
@@ -3830,12 +3902,17 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
void Heap::EagerlyFreeExternalMemory() {
- for (Page* page : *old_space()) {
- if (!page->SweepingDone()) {
- base::MutexGuard guard(page->mutex());
+ if (FLAG_array_buffer_extension) {
+ array_buffer_sweeper()->EnsureFinished();
+ } else {
+ CHECK(!FLAG_local_heaps);
+ for (Page* page : *old_space()) {
if (!page->SweepingDone()) {
- ArrayBufferTracker::FreeDead(
- page, mark_compact_collector()->non_atomic_marking_state());
+ base::MutexGuard guard(page->mutex());
+ if (!page->SweepingDone()) {
+ ArrayBufferTracker::FreeDead(
+ page, mark_compact_collector()->non_atomic_marking_state());
+ }
}
}
}
@@ -4121,7 +4198,7 @@ void Heap::Verify() {
array_buffer_sweeper()->EnsureFinished();
VerifyPointersVisitor visitor(this);
- IterateRoots(&visitor, VISIT_ONLY_STRONG);
+ IterateRoots(&visitor, {});
if (!isolate()->context().is_null() &&
!isolate()->normalized_map_cache()->IsUndefined(isolate())) {
@@ -4143,6 +4220,7 @@ void Heap::Verify() {
lo_space_->Verify(isolate());
code_lo_space_->Verify(isolate());
new_lo_space_->Verify(isolate());
+ VerifyStringTable(isolate());
}
void Heap::VerifyReadOnlyHeap() {
@@ -4191,10 +4269,14 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
- CHECK(InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
- (rinfo->IsInConstantPool() &&
- InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
+ CHECK(
+ InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
+ InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(COMPRESSED_OBJECT_SLOT,
+ rinfo->constant_pool_entry_address())) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(FULL_OBJECT_SLOT, rinfo->constant_pool_entry_address())));
}
}
@@ -4308,6 +4390,13 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
+ if (FLAG_local_heaps) {
+ // Ensure heap is iterable
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MakeLinearAllocationAreaIterable();
+ });
+ }
+
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
@@ -4361,20 +4450,13 @@ void Heap::set_builtin(int index, Code builtin) {
isolate()->builtins_table()[index] = builtin.ptr();
}
-void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
- IterateStrongRoots(v, mode);
- IterateWeakRoots(v, mode);
-}
-
-void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
- const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
- mode == VISIT_ALL_IN_MINOR_MC_MARK ||
- mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
+void Heap::IterateWeakRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
+ DCHECK(!options.contains(SkipRoot::kWeak));
v->VisitRootPointer(Root::kStringTable, nullptr,
FullObjectSlot(&roots_table()[RootIndex::kStringTable]));
v->Synchronize(VisitorSynchronization::kStringTable);
- if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
- mode != VISIT_FOR_SERIALIZATION) {
+ if (!options.contains(SkipRoot::kExternalStringTable) &&
+ !options.contains(SkipRoot::kUnserializable)) {
// Scavenge collections have special processing for this.
// Do not visit for serialization, since the external string table will
// be populated from scratch upon deserialization.
@@ -4441,10 +4523,7 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
Heap* heap_;
};
-void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
- const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
- mode == VISIT_ALL_IN_MINOR_MC_MARK ||
- mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
+void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
v->VisitRootPointers(Root::kStrongRootList, nullptr,
roots_table().strong_roots_begin(),
roots_table().strong_roots_end());
@@ -4452,11 +4531,6 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
- if (mode != VISIT_ONLY_STRONG_IGNORE_STACK) {
- isolate_->Iterate(v);
- isolate_->global_handles()->IterateStrongStackRoots(v);
- v->Synchronize(VisitorSynchronization::kTop);
- }
Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
isolate_->debug()->Iterate(v);
@@ -4465,87 +4539,107 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
isolate_->compilation_cache()->Iterate(v);
v->Synchronize(VisitorSynchronization::kCompilationCache);
- // Iterate over local handles in handle scopes.
- FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
- isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
- isolate_->handle_scope_implementer()->Iterate(v);
-
- if (FLAG_local_heaps) {
- safepoint_->Iterate(&left_trim_visitor);
- safepoint_->Iterate(v);
- }
-
- isolate_->IterateDeferredHandles(&left_trim_visitor);
- isolate_->IterateDeferredHandles(v);
- v->Synchronize(VisitorSynchronization::kHandleScope);
-
- // Iterate over the builtin code objects in the heap. Note that it is not
- // necessary to iterate over code objects on scavenge collections.
- if (!isMinorGC) {
+ if (!options.contains(SkipRoot::kOldGeneration)) {
IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
}
- // Iterate over global handles.
- switch (mode) {
- case VISIT_FOR_SERIALIZATION:
- // Global handles are not iterated by the serializer. Values referenced by
- // global handles need to be added manually.
- break;
- case VISIT_ONLY_STRONG:
- case VISIT_ONLY_STRONG_IGNORE_STACK:
- isolate_->global_handles()->IterateStrongRoots(v);
- break;
- case VISIT_ALL_IN_SCAVENGE:
- case VISIT_ALL_IN_MINOR_MC_MARK:
- isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
- break;
- case VISIT_ALL_IN_MINOR_MC_UPDATE:
- isolate_->global_handles()->IterateAllYoungRoots(v);
- break;
- case VISIT_ALL_IN_SWEEP_NEWSPACE:
- case VISIT_ALL:
- isolate_->global_handles()->IterateAllRoots(v);
- break;
- }
- v->Synchronize(VisitorSynchronization::kGlobalHandles);
+ // Iterate over pointers being held by inactive threads.
+ isolate_->thread_manager()->Iterate(v);
+ v->Synchronize(VisitorSynchronization::kThreadManager);
+
+ // Visitors in this block only run when not serializing. These include:
+ //
+ // - Thread-local and stack.
+ // - Handles.
+ // - Microtasks.
+ // - The startup object cache.
+ //
+ // When creating real startup snapshot, these areas are expected to be empty.
+ // It is also possible to create a snapshot of a *running* isolate for testing
+ // purposes. In this case, these areas are likely not empty and will simply be
+ // skipped.
+ //
+ // The general guideline for adding visitors to this section vs. adding them
+ // above is that non-transient heap state is always visited, transient heap
+ // state is visited only when not serializing.
+ if (!options.contains(SkipRoot::kUnserializable)) {
+ if (!options.contains(SkipRoot::kGlobalHandles)) {
+ if (options.contains(SkipRoot::kWeak)) {
+ if (options.contains(SkipRoot::kOldGeneration)) {
+ // Skip handles that are either weak or old.
+ isolate_->global_handles()->IterateYoungStrongAndDependentRoots(v);
+ } else {
+ // Skip handles that are weak.
+ isolate_->global_handles()->IterateStrongRoots(v);
+ }
+ } else {
+ // Do not skip weak handles.
+ if (options.contains(SkipRoot::kOldGeneration)) {
+ // Skip handles that are old.
+ isolate_->global_handles()->IterateAllYoungRoots(v);
+ } else {
+ // Do not skip any handles.
+ isolate_->global_handles()->IterateAllRoots(v);
+ }
+ }
+ }
+ v->Synchronize(VisitorSynchronization::kGlobalHandles);
+
+ if (!options.contains(SkipRoot::kStack)) {
+ IterateStackRoots(v);
+ v->Synchronize(VisitorSynchronization::kTop);
+ }
+
+ // Iterate over local handles in handle scopes.
+ FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
+ isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
+ isolate_->handle_scope_implementer()->Iterate(v);
+
+ if (FLAG_local_heaps) {
+ safepoint_->Iterate(&left_trim_visitor);
+ safepoint_->Iterate(v);
+ isolate_->persistent_handles_list()->Iterate(&left_trim_visitor);
+ isolate_->persistent_handles_list()->Iterate(v);
+ }
+
+ isolate_->IterateDeferredHandles(&left_trim_visitor);
+ isolate_->IterateDeferredHandles(v);
+ v->Synchronize(VisitorSynchronization::kHandleScope);
- // Iterate over eternal handles. Eternal handles are not iterated by the
- // serializer. Values referenced by eternal handles need to be added manually.
- if (mode != VISIT_FOR_SERIALIZATION) {
- if (isMinorGC) {
+ if (options.contains(SkipRoot::kOldGeneration)) {
isolate_->eternal_handles()->IterateYoungRoots(v);
} else {
isolate_->eternal_handles()->IterateAllRoots(v);
}
- }
- v->Synchronize(VisitorSynchronization::kEternalHandles);
+ v->Synchronize(VisitorSynchronization::kEternalHandles);
- // Iterate over pointers being held by inactive threads.
- isolate_->thread_manager()->Iterate(v);
- v->Synchronize(VisitorSynchronization::kThreadManager);
+ // Iterate over pending Microtasks stored in MicrotaskQueues.
+ MicrotaskQueue* default_microtask_queue =
+ isolate_->default_microtask_queue();
+ if (default_microtask_queue) {
+ MicrotaskQueue* microtask_queue = default_microtask_queue;
+ do {
+ microtask_queue->IterateMicrotasks(v);
+ microtask_queue = microtask_queue->next();
+ } while (microtask_queue != default_microtask_queue);
+ }
- // Iterate over other strong roots (currently only identity maps).
- for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
- v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
- }
- v->Synchronize(VisitorSynchronization::kStrongRoots);
+ // Iterate over other strong roots (currently only identity maps and
+ // deoptimization entries).
+ for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
+ v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
+ }
+ v->Synchronize(VisitorSynchronization::kStrongRoots);
- // Iterate over pending Microtasks stored in MicrotaskQueues.
- MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
- if (default_microtask_queue) {
- MicrotaskQueue* microtask_queue = default_microtask_queue;
- do {
- microtask_queue->IterateMicrotasks(v);
- microtask_queue = microtask_queue->next();
- } while (microtask_queue != default_microtask_queue);
+ // Iterate over the startup object cache unless serializing or
+ // deserializing.
+ SerializerDeserializer::Iterate(isolate_, v);
+ v->Synchronize(VisitorSynchronization::kStartupObjectCache);
}
- // Iterate over the partial snapshot cache unless serializing or
- // deserializing.
- if (mode != VISIT_FOR_SERIALIZATION) {
- SerializerDeserializer::Iterate(isolate_, v);
- v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
+ if (!options.contains(SkipRoot::kWeak)) {
+ IterateWeakRoots(v, options);
}
}
@@ -4563,6 +4657,11 @@ void Heap::IterateBuiltins(RootVisitor* v) {
STATIC_ASSERT(Builtins::AllBuiltinsAreIsolateIndependent());
}
+void Heap::IterateStackRoots(RootVisitor* v) {
+ isolate_->Iterate(v);
+ isolate_->global_handles()->IterateStrongStackRoots(v);
+}
+
namespace {
size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
const size_t kGlobalMemoryToV8Ratio = 2;
@@ -4866,10 +4965,13 @@ bool Heap::ShouldOptimizeForLoadTime() {
// major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it.
-bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
+bool Heap::ShouldExpandOldGenerationOnSlowAllocation(LocalHeap* local_heap) {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit.
+ // Ensure that retry of allocation on background thread succeeds
+ if (IsRetryOfFailedAllocation(local_heap)) return true;
+
if (ShouldOptimizeForMemoryUsage()) return false;
if (ShouldOptimizeForLoadTime()) return true;
@@ -4886,6 +4988,11 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
return true;
}
+bool Heap::IsRetryOfFailedAllocation(LocalHeap* local_heap) {
+ if (!local_heap) return false;
+ return local_heap->allocation_failed_;
+}
+
Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
if (ShouldReduceMemory() || FLAG_stress_compaction) {
return Heap::HeapGrowingMode::kMinimal;
@@ -4910,6 +5017,22 @@ size_t Heap::GlobalMemoryAvailable() {
: new_space_->Capacity() + 1;
}
+double Heap::PercentToOldGenerationLimit() {
+ double size_at_gc = old_generation_size_at_last_gc_;
+ double size_now = OldGenerationObjectsAndPromotedExternalMemorySize();
+ double current_bytes = size_now - size_at_gc;
+ double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
+}
+
+double Heap::PercentToGlobalMemoryLimit() {
+ double size_at_gc = old_generation_size_at_last_gc_;
+ double size_now = OldGenerationObjectsAndPromotedExternalMemorySize();
+ double current_bytes = size_now - size_at_gc;
+ double total_bytes = old_generation_allocation_limit_ - size_at_gc;
+ return total_bytes > 0 ? (current_bytes / total_bytes) * 100.0 : 0;
+}
+
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
// The kNoLimit means that either incremental marking is disabled or it is too
// early to start incremental marking.
@@ -4937,37 +5060,42 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
}
if (FLAG_stress_marking > 0) {
- double gained_since_last_gc =
- PromotedSinceLastGC() +
- (isolate()->isolate_data()->external_memory_ -
- isolate()->isolate_data()->external_memory_low_since_mark_compact_);
- double size_before_gc =
- OldGenerationObjectsAndPromotedExternalMemorySize() -
- gained_since_last_gc;
- double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
- if (bytes_to_limit > 0) {
- double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
-
+ int current_percent = static_cast<int>(
+ std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
+ if (current_percent > 0) {
if (FLAG_trace_stress_marking) {
isolate()->PrintWithTimestamp(
- "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
+ "[IncrementalMarking] %d%% of the memory limit reached\n",
current_percent);
}
-
if (FLAG_fuzzer_gc_analysis) {
// Skips values >=100% since they already trigger marking.
- if (current_percent < 100.0) {
+ if (current_percent < 100) {
max_marking_limit_reached_ =
- std::max(max_marking_limit_reached_, current_percent);
+ std::max<double>(max_marking_limit_reached_, current_percent);
}
- } else if (static_cast<int>(current_percent) >=
- stress_marking_percentage_) {
+ } else if (current_percent >= stress_marking_percentage_) {
stress_marking_percentage_ = NextStressMarkingLimit();
return IncrementalMarkingLimit::kHardLimit;
}
}
}
+ if (FLAG_incremental_marking_soft_trigger > 0 ||
+ FLAG_incremental_marking_hard_trigger > 0) {
+ int current_percent = static_cast<int>(
+ std::max(PercentToOldGenerationLimit(), PercentToGlobalMemoryLimit()));
+ if (current_percent > FLAG_incremental_marking_hard_trigger &&
+ FLAG_incremental_marking_hard_trigger > 0) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ if (current_percent > FLAG_incremental_marking_soft_trigger &&
+ FLAG_incremental_marking_soft_trigger > 0) {
+ return IncrementalMarkingLimit::kSoftLimit;
+ }
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+
size_t old_generation_space_available = OldGenerationSpaceAvailable();
const size_t global_memory_available = GlobalMemoryAvailable();
@@ -5173,6 +5301,12 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
}
+void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
+ CHECK(V8_SHARED_RO_HEAP_BOOL);
+ delete read_only_space_;
+ space_[RO_SPACE] = read_only_space_ = space;
+}
+
void Heap::SetUpSpaces() {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
@@ -5322,20 +5456,6 @@ void Heap::NotifyOldGenerationExpansion() {
}
}
-void Heap::NotifyOffThreadSpaceMerged() {
- // TODO(leszeks): Ideally we would do this check during off-thread page
- // allocation too, to proactively do GC. We should also probably do this check
- // before merging rather than after.
- if (!CanExpandOldGeneration(0)) {
- // TODO(leszeks): We should try to invoke the near-heap limit callback and
- // do a last-resort GC first.
- FatalProcessOutOfMemory("Failed to merge off-thread pages into heap.");
- }
- StartIncrementalMarkingIfAllocationLimitIsReached(
- GCFlagsForIncrementalMarking(), kGCCallbackScheduleIdleGarbageCollection);
- NotifyOldGenerationExpansion();
-}
-
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
@@ -5346,8 +5466,11 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
}
EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
- if (ShouldReduceMemory())
+ if (is_current_gc_forced()) {
+ return EmbedderHeapTracer::TraceFlags::kForced;
+ } else if (ShouldReduceMemory()) {
return EmbedderHeapTracer::TraceFlags::kReduceMemory;
+ }
return EmbedderHeapTracer::TraceFlags::kNoFlags;
}
@@ -5376,6 +5499,7 @@ void Heap::StartTearDown() {
// a good time to run heap verification (if requested), before starting to
// tear down parts of the Isolate.
if (FLAG_verify_heap) {
+ SafepointScope scope(this);
Verify();
}
#endif
@@ -5590,11 +5714,11 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
set_script_list(*scripts);
}
-void Heap::AddRetainedMap(Handle<Map> map) {
+void Heap::AddRetainedMap(Handle<NativeContext> context, Handle<Map> map) {
if (map->is_in_retained_map_list()) {
return;
}
- Handle<WeakArrayList> array(retained_maps(), isolate());
+ Handle<WeakArrayList> array(context->retained_maps(), isolate());
if (array->IsFull()) {
CompactRetainedMaps(*array);
}
@@ -5603,17 +5727,15 @@ void Heap::AddRetainedMap(Handle<Map> map) {
array = WeakArrayList::AddToEnd(
isolate(), array,
MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
- if (*array != retained_maps()) {
- set_retained_maps(*array);
+ if (*array != context->retained_maps()) {
+ context->set_retained_maps(*array);
}
map->set_is_in_retained_map_list(true);
}
void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
- DCHECK_EQ(retained_maps, this->retained_maps());
int length = retained_maps.length();
int new_length = 0;
- int new_number_of_disposed_maps = 0;
// This loop compacts the array by removing cleared weak cells.
for (int i = 0; i < length; i += 2) {
MaybeObject maybe_object = retained_maps.Get(i);
@@ -5629,12 +5751,8 @@ void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
retained_maps.Set(new_length, maybe_object);
retained_maps.Set(new_length + 1, age);
}
- if (i < number_of_disposed_maps_) {
- new_number_of_disposed_maps += 2;
- }
new_length += 2;
}
- number_of_disposed_maps_ = new_number_of_disposed_maps;
HeapObject undefined = ReadOnlyRoots(this).undefined_value();
for (int i = new_length; i < length; i++) {
retained_maps.Set(i, HeapObjectReference::Strong(undefined));
@@ -5722,6 +5840,15 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
#endif
}
+
+void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
+#ifndef V8_DISABLE_WRITE_BARRIERS
+ Page* page = Page::FromAddress(start);
+ DCHECK(!page->IsLargePage());
+ DCHECK(!page->InYoungGeneration());
+ RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
+#endif
+}
#endif
void Heap::ClearRecordedSlotRange(Address start, Address end) {
@@ -5875,7 +6002,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void MarkReachableObjects() {
MarkingVisitor visitor(this);
- heap_->IterateRoots(&visitor, VISIT_ALL);
+ heap_->IterateRoots(&visitor, {});
visitor.TransitiveClosure();
}
@@ -6071,13 +6198,16 @@ void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
set_builtins_constants_table(cache);
}
+void Heap::SetDetachedContexts(WeakArrayList detached_contexts) {
+ set_detached_contexts(detached_contexts);
+}
+
void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code.builtin_index());
set_interpreter_entry_trampoline_for_profiling(code);
}
void Heap::PostFinalizationRegistryCleanupTaskIfNeeded() {
- DCHECK(!isolate()->host_cleanup_finalization_group_callback());
// Only one cleanup task is posted at a time.
if (!HasDirtyJSFinalizationRegistries() ||
is_finalization_registry_cleanup_task_posted_) {
@@ -6109,10 +6239,9 @@ void Heap::EnqueueDirtyJSFinalizationRegistry(
JSFinalizationRegistry tail = JSFinalizationRegistry::cast(
dirty_js_finalization_registries_list_tail());
tail.set_next_dirty(finalization_registry);
- gc_notify_updated_slot(tail,
- finalization_registry.RawField(
- JSFinalizationRegistry::kNextDirtyOffset),
- finalization_registry);
+ gc_notify_updated_slot(
+ tail, tail.RawField(JSFinalizationRegistry::kNextDirtyOffset),
+ finalization_registry);
}
set_dirty_js_finalization_registries_list_tail(finalization_registry);
// dirty_js_finalization_registries_list_tail_ is rescanned by
@@ -6138,7 +6267,6 @@ MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
if (!FLAG_harmony_weak_refs) return;
- if (isolate()->host_cleanup_finalization_group_callback()) return;
DisallowHeapAllocation no_gc;
@@ -6252,6 +6380,17 @@ std::vector<Handle<NativeContext>> Heap::FindAllNativeContexts() {
return result;
}
+std::vector<WeakArrayList> Heap::FindAllRetainedMaps() {
+ std::vector<WeakArrayList> result;
+ Object context = native_contexts_list();
+ while (!context.IsUndefined(isolate())) {
+ NativeContext native_context = NativeContext::cast(context);
+ result.push_back(native_context.retained_maps());
+ context = native_context.next_context_link();
+ }
+ return result;
+}
+
size_t Heap::NumberOfDetachedContexts() {
// The detached_contexts() array has two entries per detached context.
return detached_contexts().length() / 2;
@@ -6609,12 +6748,11 @@ void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
addr = rinfo->constant_pool_entry_address();
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ slot_type = COMPRESSED_OBJECT_SLOT;
} else {
- // Constant pools don't currently support compressed objects, as
- // their values are all pointer sized (though this could change
- // therefore we have a DCHECK).
DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
- slot_type = OBJECT_SLOT;
+ slot_type = FULL_OBJECT_SLOT;
}
}
uintptr_t offset = addr - source_page->address();