Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-04-17 18:34:16 +0300
committerMichaël Zasso <targos@protonmail.com>2021-04-30 13:54:15 +0300
commitfa45d6a3586234588a2c23389d1a3b246c9b74c2 (patch)
treed751d61d79c2c400e4e57868b4c93317a82e1473 /deps
parentc5fe3a226a6d843bd3f5569dd1b6312f183e7d54 (diff)
deps: V8: cherry-pick 93b2105fbe44
Original commit message: [wasm][ia32][liftoff] Implement AtomicCompareExchange As there are not enough registers on ia32 to execute the platform- independent code, the CL also adds ia32-specific code to liftoff-compiler.cc. For this we first retrieve the memory index from the stack, do a bounds check, and calculate the final address. Only afterwards we pop all other values from the stack and pass them to the platform-dependent code. R=clemensb@chromium.org Bug: v8:10108 Change-Id: I741266a9523c8b5c46acc0b29817fd143a75752e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2316305 Reviewed-by: Clemens Backes <clemensb@chromium.org> Commit-Queue: Andreas Haas <ahaas@chromium.org> Cr-Commit-Position: refs/heads/master@{#69047} Refs: https://github.com/v8/v8/commit/93b2105fbe44c7d1260bbc8b466042624002e9c6 PR-URL: https://github.com/nodejs/node/pull/38275 Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h118
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc35
2 files changed, 149 insertions, 4 deletions
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 7a1d629bf2d..b9466195236 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -532,7 +532,123 @@ void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ // We expect that the offset has already been added to {dst_addr}, and no
+ // {offset_reg} is provided. This is to save registers.
+ DCHECK_EQ(offset_reg, no_reg);
+
+ DCHECK_EQ(result, expected);
+
+ if (type.value() != StoreType::kI64Store) {
+ bool is_64_bit_op = type.value_type() == kWasmI64;
+
+ Register value_reg = is_64_bit_op ? new_value.low_gp() : new_value.gp();
+ Register expected_reg = is_64_bit_op ? expected.low_gp() : expected.gp();
+ Register result_reg = expected_reg;
+
+ bool is_byte_store = type.size() == 1;
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, value_reg, expected_reg);
+
+ // Ensure that {value_reg} is a valid register.
+ if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
+ Register safe_value_reg =
+ pinned.set(GetUnusedRegister(liftoff::kByteRegs, pinned)).gp();
+ mov(safe_value_reg, value_reg);
+ value_reg = safe_value_reg;
+ pinned.clear(LiftoffRegister(value_reg));
+ }
+
+ // The cmpxchg instruction uses eax to store the old value of the
+ // compare-exchange primitive. Therefore we have to spill the register and
+ // move any use to another register.
+ ClearRegister(eax, {&dst_addr, &value_reg}, pinned);
+ if (expected_reg != eax) {
+ mov(eax, expected_reg);
+ }
+
+ Operand dst_op = Operand(dst_addr, offset_imm);
+
+ lock();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ cmpxchg_b(dst_op, value_reg);
+ movzx_b(result_reg, eax);
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ cmpxchg_w(dst_op, value_reg);
+ movzx_w(result_reg, eax);
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ cmpxchg(dst_op, value_reg);
+ if (result_reg != eax) {
+ mov(result_reg, eax);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ if (is_64_bit_op) {
+ xor_(result.high_gp(), result.high_gp());
+ }
+ return;
+ }
+
+ // The following code handles kExprI64AtomicCompareExchange.
+
+ // We need {ebx} here, which is the root register. The root register it
+ // needs special treatment. As we use {ebx} directly in the code below, we
+ // have to make sure here that the root register is actually {ebx}.
+ static_assert(kRootRegister == ebx,
+ "The following code assumes that kRootRegister == ebx");
+ push(kRootRegister);
+
+ // The compare-exchange instruction uses registers as follows:
+ // old-value = EDX:EAX; new-value = ECX:EBX.
+ Register expected_hi = edx;
+ Register expected_lo = eax;
+ Register new_hi = ecx;
+ Register new_lo = ebx;
+ // The address needs a separate registers that does not alias with the
+ // ones above.
+ Register address = esi;
+
+ // Spill all these registers if they are still holding other values.
+ liftoff::SpillRegisters(this, expected_hi, expected_lo, new_hi, address);
+
+ // We have to set new_lo specially, because it's the root register. We do it
+ // before setting all other registers so that the original value does not get
+ // overwritten.
+ mov(new_lo, new_value.low_gp());
+
+ // Move all other values into the right register.
+ {
+ LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
+ {LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32},
+ {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64},
+ {LiftoffRegister(new_hi), new_value.high(), kWasmI32}};
+ ParallelRegisterMove(ArrayVector(reg_moves));
+ };
+
+ Operand dst_op = Operand(address, offset_imm);
+
+ lock();
+ cmpxchg8b(dst_op);
+
+ // Restore the root register, and we are done.
+ pop(kRootRegister);
+
+ // Move the result into the correct registers.
+ {
+ LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
+ {result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}};
+ ParallelRegisterMove(ArrayVector(reg_moves));
+ }
}
void LiftoffAssembler::AtomicFence() { mfence(); }
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 4d0d9dbceca..86c33b387ec 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -2879,9 +2879,38 @@ class LiftoffCompiler {
void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm) {
#ifdef V8_TARGET_ARCH_IA32
- // With the current implementation we do not have enough registers on ia32
- // to even get to the platform-specific code. Therefore we bailout early.
- unsupported(decoder, kAtomics, "AtomicCompareExchange");
+ // On ia32 we don't have enough registers to first pop all the values off
+ // the stack and then start with the code generation. Instead we do the
+ // complete address calculation first, so that the address only needs a
+ // single register. Afterwards we load all remaining values into the
+ // other registers.
+ LiftoffRegList pinned;
+ Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp();
+ if (BoundsCheckMem(decoder, type.size(), imm.offset, index_reg, pinned,
+ kDoForceCheck)) {
+ return;
+ }
+ AlignmentCheckMem(decoder, type.size(), imm.offset, index_reg, pinned);
+
+ uint32_t offset = imm.offset;
+ index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ __ emit_i32_add(addr, addr, index_reg);
+ pinned.clear(LiftoffRegister(index_reg));
+ LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
+
+ // Pop the index from the stack.
+ __ cache_state()->stack_state.pop_back(1);
+
+ LiftoffRegister result = expected;
+
+ // We already added the index to addr, so we can just pass no_reg to the
+ // assembler now.
+ __ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
+ type);
+ __ PushRegister(type.value_type(), result);
return;
#else
ValueType result_type = type.value_type();