Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/store-buffer.cc')
-rw-r--r--deps/v8/src/heap/store-buffer.cc71
1 files changed, 60 insertions, 11 deletions
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index d73e3235c15..b428a82046c 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -10,6 +10,7 @@
#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/store-buffer-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/v8.h"
@@ -30,22 +31,28 @@ StoreBuffer::StoreBuffer(Heap* heap)
}
void StoreBuffer::SetUp() {
- // Allocate 3x the buffer size, so that we can start the new store buffer
- // aligned to 2x the size. This lets us use a bit test to detect the end of
- // the area.
+ const size_t requested_size = kStoreBufferSize * kStoreBuffers;
+ // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
+ // use a bit test to detect the ends of the buffers.
+ const size_t alignment =
+ std::max<size_t>(kStoreBufferSize, AllocatePageSize());
+ void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
VirtualMemory reservation;
- if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
- &reservation)) {
+ if (!AlignedAllocVirtualMemory(requested_size, alignment, hint,
+ &reservation)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
+
Address start = reservation.address();
- start_[0] = reinterpret_cast<Address*>(::RoundUp(start, kStoreBufferSize));
+ const size_t allocated_size = reservation.size();
+
+ start_[0] = reinterpret_cast<Address*>(start);
limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
start_[1] = limit_[0];
limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
- Address* vm_limit = reinterpret_cast<Address*>(start + reservation.size());
-
+ // Sanity check the buffers.
+ Address* vm_limit = reinterpret_cast<Address*>(start + allocated_size);
USE(vm_limit);
for (int i = 0; i < kStoreBuffers; i++) {
DCHECK(reinterpret_cast<Address>(start_[i]) >= reservation.address());
@@ -55,8 +62,9 @@ void StoreBuffer::SetUp() {
DCHECK_EQ(0, reinterpret_cast<Address>(limit_[i]) & kStoreBufferMask);
}
- if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
- kStoreBufferSize * kStoreBuffers,
+ // Set RW permissions only on the pages we use.
+ const size_t used_size = RoundUp(requested_size, CommitPageSize());
+ if (!reservation.SetPermissions(start, used_size,
PageAllocator::kReadWrite)) {
heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
@@ -65,7 +73,6 @@ void StoreBuffer::SetUp() {
virtual_memory_.TakeControl(&reservation);
}
-
void StoreBuffer::TearDown() {
if (virtual_memory_.IsReserved()) virtual_memory_.Free();
top_ = nullptr;
@@ -76,6 +83,48 @@ void StoreBuffer::TearDown() {
}
}
+void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+ Address end) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertDeletionIntoStoreBuffer(start, end);
+}
+
+void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertIntoStoreBuffer(slot);
+}
+
+void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address start, Address end) {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(store_buffer->Empty());
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ Page* page = Page::FromAddress(start);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, start);
+ }
+}
+
+void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address slot) {
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+}
+
+void StoreBuffer::SetMode(StoreBufferMode mode) {
+ mode_ = mode;
+ if (mode == NOT_IN_GC) {
+ insertion_callback = &InsertDuringRuntime;
+ deletion_callback = &DeleteDuringRuntime;
+ } else {
+ insertion_callback = &InsertDuringGarbageCollection;
+ deletion_callback = &DeleteDuringGarbageCollection;
+ }
+}
+
int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment();