Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/memory-chunk.cc')
-rw-r--r--deps/v8/src/heap/memory-chunk.cc157
1 files changed, 157 insertions, 0 deletions
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
new file mode 100644
index 00000000000..865e6f1a72b
--- /dev/null
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -0,0 +1,157 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/memory-chunk.h"
+
+#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
+ base::AddressRegion memory_area =
+ MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
+ if (memory_area.size() != 0) {
+ MemoryAllocator* memory_allocator = heap_->memory_allocator();
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(executable());
+ CHECK(page_allocator->DiscardSystemPages(
+ reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
+ }
+}
+
+size_t MemoryChunkLayout::CodePageGuardStartOffset() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::CodePageGuardSize() {
+ return MemoryAllocator::GetCommitPageSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
+ // We are guarding code pages: the last OS page will be protected as
+ // non-writable.
+ return Page::kPageSize -
+ static_cast<int>(MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
+ size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
+ return RoundUp(MemoryChunk::kHeaderSize, kTaggedSize);
+}
+
+size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return ObjectStartOffsetInCodePage();
+ }
+ return ObjectStartOffsetInDataPage();
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
+ size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return AllocatableMemoryInCodePage();
+ }
+ return AllocatableMemoryInDataPage();
+}
+
+#ifdef THREAD_SANITIZER
+void MemoryChunk::SynchronizedHeapLoad() {
+ CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
+ reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
+ InReadOnlySpace());
+}
+#endif
+
+void MemoryChunk::InitializationMemoryFence() {
+ base::SeqCst_MemoryFence();
+#ifdef THREAD_SANITIZER
+ // Since TSAN does not process memory fences, we use the following annotation
+ // to tell TSAN that there is no data race when emitting a
+ // InitializationMemoryFence. Note that the other thread still needs to
+ // perform MemoryChunk::synchronized_heap().
+ base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
+ reinterpret_cast<base::AtomicWord>(heap_));
+#endif
+}
+
+void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::Permission permission) {
+ DCHECK(permission == PageAllocator::kRead ||
+ permission == PageAllocator::kReadExecute);
+ DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
+ // Decrementing the write_unprotect_counter_ and changing the page
+ // protection mode has to be atomic.
+ base::MutexGuard guard(page_protection_change_mutex_);
+ if (write_unprotect_counter_ == 0) {
+ // This is a corner case that may happen when we have a
+ // CodeSpaceMemoryModificationScope open and this page was newly
+ // added.
+ return;
+ }
+ write_unprotect_counter_--;
+ DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
+ if (write_unprotect_counter_ == 0) {
+ Address protect_start =
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAligned(protect_start, page_size));
+ size_t protect_size = RoundUp(area_size(), page_size);
+ CHECK(reservation_.SetPermissions(protect_start, protect_size, permission));
+ }
+}
+
+void MemoryChunk::SetReadable() {
+ DecrementWriteUnprotectCounterAndMaybeSetPermissions(PageAllocator::kRead);
+}
+
+void MemoryChunk::SetReadAndExecutable() {
+ DCHECK(!FLAG_jitless);
+ DecrementWriteUnprotectCounterAndMaybeSetPermissions(
+ PageAllocator::kReadExecute);
+}
+
+void MemoryChunk::SetReadAndWritable() {
+ DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
+ // Incrementing the write_unprotect_counter_ and changing the page
+ // protection mode has to be atomic.
+ base::MutexGuard guard(page_protection_change_mutex_);
+ write_unprotect_counter_++;
+ DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
+ if (write_unprotect_counter_ == 1) {
+ Address unprotect_start =
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ DCHECK(IsAligned(unprotect_start, page_size));
+ size_t unprotect_size = RoundUp(area_size(), page_size);
+ CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
+ PageAllocator::kReadWrite));
+ }
+}
+
+} // namespace internal
+} // namespace v8