Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm/assembler-arm.cc')
-rw-r--r--deps/v8/src/arm/assembler-arm.cc144
1 files changed, 41 insertions, 103 deletions
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 576bcb30f60..163fa4c2192 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -485,7 +485,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
break;
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
- Memory::Address_at(constant_pool_entry_address(pc, 0 /* unused */)) =
+ Memory<Address>(constant_pool_entry_address(pc, 0 /* unused */)) =
object.address();
}
}
@@ -2058,6 +2058,13 @@ void Assembler::rbit(Register dst, Register src, Condition cond) {
emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
}
+void Assembler::rev(Register dst, Register src, Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.144.
+ // cond(31-28) | 011010111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
+ DCHECK(dst != pc);
+ DCHECK(src != pc);
+ emit(cond | 0x6BF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
+}
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
@@ -2233,6 +2240,30 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
0xF9 * B4 | src2.code());
}
+void Assembler::ldrexd(Register dst1, Register dst2, Register src,
+ Condition cond) {
+ // cond(31-28) | 00011011(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ DCHECK(dst1 != lr); // r14.
+ // The pair of destination registers is restricted to being an even-numbered
+ // register and the odd-numbered register that immediately follows it.
+ DCHECK_EQ(0, dst1.code() % 2);
+ DCHECK_EQ(dst1.code() + 1, dst2.code());
+ emit(cond | B24 | B23 | B21 | B20 | src.code() * B16 | dst1.code() * B12 |
+ 0xF9F);
+}
+
+void Assembler::strexd(Register res, Register src1, Register src2, Register dst,
+ Condition cond) {
+ // cond(31-28) | 00011010(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+ DCHECK(src1 != lr); // r14.
+ // The pair of source registers is restricted to being an even-numbered
+ // register and the odd-numbered register that immediately follows it.
+ DCHECK_EQ(0, src1.code() % 2);
+ DCHECK_EQ(src1.code() + 1, src2.code());
+ emit(cond | B24 | B23 | B21 | dst.code() * B16 | res.code() * B12 |
+ 0xF9 * B4 | src1.code());
+}
+
// Preload instructions.
void Assembler::pld(const MemOperand& address) {
// Instruction details available in ARM DDI 0406C.b, A8.8.128.
@@ -2827,25 +2858,6 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm) {
- CpuFeatureScope scope(this, ARMv7);
- // TODO(jfb) Temporarily turned off until we have constant blinding or
- // some equivalent mitigation: an attacker can otherwise control
- // generated data which also happens to be executable, a Very Bad
- // Thing indeed.
- // Blinding gets tricky because we don't have xor, we probably
- // need to add/subtract without losing precision, which requires a
- // cookie value that Lithium is probably better positioned to
- // choose.
- // We could also add a few peepholes here like detecting 0.0 and
- // -0.0 and doing a vmov from the sequestered d14, forcing denorms
- // to zero (we set flush-to-zero), and normalizing NaN values.
- // We could also detect redundant values.
- // The code could also randomize the order of values, though
- // that's tricky because vldr has a limited reach. Furthermore
- // it breaks load locality.
- ConstantPoolAddEntry(pc_offset(), imm);
- vldr(dst, MemOperand(pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -2861,14 +2873,14 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
} else if (extra_scratch == no_reg) {
// We only have one spare scratch register.
mov(scratch, Operand(lo));
- vmov(dst, VmovIndexLo, scratch);
+ vmov(NeonS32, dst, 0, scratch);
if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movt(scratch, hi >> 16);
} else {
mov(scratch, Operand(hi));
}
- vmov(dst, VmovIndexHi, scratch);
+ vmov(NeonS32, dst, 1, scratch);
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
@@ -2909,40 +2921,6 @@ void Assembler::vmov(const DwVfpRegister dst,
}
void Assembler::vmov(const DwVfpRegister dst,
- const VmovIndex index,
- const Register src,
- const Condition cond) {
- // Dd[index] = Rt
- // Instruction details available in ARM DDI 0406C.b, A8-940.
- // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
- // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
- DCHECK(VfpRegisterIsAvailable(dst));
- DCHECK(index.index == 0 || index.index == 1);
- int vd, d;
- dst.split_code(&vd, &d);
- emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
- d*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
- const VmovIndex index,
- const DwVfpRegister src,
- const Condition cond) {
- // Dd[index] = Rt
- // Instruction details available in ARM DDI 0406C.b, A8.8.342.
- // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
- // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
- DCHECK(VfpRegisterIsAvailable(src));
- DCHECK(index.index == 0 || index.index == 1);
- int vn, n;
- src.split_code(&vn, &n);
- emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
- 0xB*B8 | n*B7 | B4);
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond) {
@@ -5140,14 +5118,17 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
- bool sharing_ok =
- RelocInfo::IsNone(rmode) || RelocInfo::IsShareableRelocMode(rmode);
+ // We can share CODE_TARGETs because we don't patch the code objects anymore,
+ // and we make sure we emit only one reloc info for them (thus delta patching)
+ // will apply the delta only once. At the moment, we do not dedup code targets
+ // if they are wrapped in a heap object request (value == 0).
+ bool sharing_ok = RelocInfo::IsShareableRelocMode(rmode) ||
+ (rmode == RelocInfo::CODE_TARGET && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
}
- ConstantPoolEntry entry(
- position, value, sharing_ok || (rmode == RelocInfo::CODE_TARGET), rmode);
+ ConstantPoolEntry entry(position, value, sharing_ok, rmode);
bool shared = false;
if (sharing_ok) {
@@ -5164,24 +5145,6 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
- // Share entries if allowed and possible.
- // Null-values are placeholders and must be ignored.
- if (rmode == RelocInfo::CODE_TARGET && value != 0) {
- // Sharing entries here relies on canonicalized handles - without them, we
- // will miss the optimisation opportunity.
- Address handle_address = static_cast<Address>(value);
- auto existing = handle_to_index_map_.find(handle_address);
- if (existing != handle_to_index_map_.end()) {
- int index = existing->second;
- entry.set_merged_index(index);
- shared = true;
- } else {
- // Keep track of this code handle.
- handle_to_index_map_[handle_address] =
- static_cast<int>(pending_32_bit_constants_.size());
- }
- }
-
pending_32_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
@@ -5194,30 +5157,6 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
-void Assembler::ConstantPoolAddEntry(int position, Double value) {
- DCHECK_LT(pending_64_bit_constants_.size(), kMaxNumPending64Constants);
- if (pending_64_bit_constants_.empty()) {
- first_const_pool_64_use_ = position;
- }
- ConstantPoolEntry entry(position, value);
-
- // Merge the constant, if possible.
- for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
- ConstantPoolEntry& current_entry = pending_64_bit_constants_[i];
- DCHECK(current_entry.sharing_ok());
- if (entry.value() == current_entry.value()) {
- entry.set_merged_index(i);
- break;
- }
- }
- pending_64_bit_constants_.push_back(entry);
-
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
-}
-
-
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
@@ -5419,7 +5358,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
pending_32_bit_constants_.clear();
pending_64_bit_constants_.clear();
- handle_to_index_map_.clear();
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;