Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/nodejs/node.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-04-17 18:34:17 +0300
committerMichaël Zasso <targos@protonmail.com>2021-04-30 13:54:17 +0300
commit4d0bc3839ad085b55bcbc4216588098243b37abf (patch)
treeee21eac82142c0b3bcff6798c16bc178d20e83c6 /deps
parent982937893e9643d8bf5b8295d5e26ac9a89a7391 (diff)
deps: V8: cherry-pick 6771d3e31883
Original commit message: [wasm][liftoff][cleanup] Remove default parameter of GetUnusedRegister This CL removes the default parameter of GetUnusedRegister to avoid bugs where the default parameter is used accidentially. With "{}" the default value of the parameter is easy to write, and also not much more difficult to read. R=clemensb@chromium.org Bug: v8:10506 Change-Id: I3debe5eb91578c82abdac81dc6c252435fdf30d6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2202991 Reviewed-by: Clemens Backes <clemensb@chromium.org> Commit-Queue: Andreas Haas <ahaas@chromium.org> Cr-Commit-Position: refs/heads/master@{#67822} Refs: https://github.com/v8/v8/commit/6771d3e31883b01c948e44a15a8bed1a373f59c8 PR-URL: https://github.com/nodejs/node/pull/38275 Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h10
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h20
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc4
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h4
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc51
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h8
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h8
7 files changed, 53 insertions, 52 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 9aeffbab576..e7f5d82c220 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -437,7 +437,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
case ValueType::kF64: {
- Register extra_scratch = GetUnusedRegister(kGpReg).gp();
+ Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
}
@@ -1173,7 +1173,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -1216,7 +1216,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
// The scratch register will be required by str if multiple instructions
// are required to encode the offset, and so we cannot use it in that case.
if (!ImmediateFitsAddrMode2Instruction(dst.offset())) {
- src = GetUnusedRegister(kGpReg).gp();
+ src = GetUnusedRegister(kGpReg, {}).gp();
} else {
src = temps.Acquire();
}
@@ -1758,7 +1758,7 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
UseScratchRegisterScope temps(this);
- Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovLow(scratch, lhs);
// Clear sign bit in {scratch}.
@@ -1777,7 +1777,7 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
// On arm, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
UseScratchRegisterScope temps(this);
- Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovHigh(scratch, lhs);
// Clear sign bit in {scratch}.
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 830aa7c4b1d..113674a2531 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -130,7 +130,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
- return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
+ return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
}
inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
@@ -1468,7 +1468,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst,
// We need one tmp register to extract the sign bit. Get it right at the
// beginning, such that the spilling code is not accidentially jumped over.
- Register tmp = assm->GetUnusedRegister(kGpReg).gp();
+ Register tmp = assm->GetUnusedRegister(kGpReg, {}).gp();
#define dop(name, ...) \
do { \
@@ -1531,9 +1531,9 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
static constexpr int kF32SignBit = 1 << 31;
- Register scratch = GetUnusedRegister(kGpReg).gp();
- Register scratch2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+ LiftoffRegList pinned;
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Movd(scratch, lhs); // move {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
Movd(scratch2, rhs); // move {rhs} into {scratch2}.
@@ -1660,9 +1660,9 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
static constexpr int kF32SignBit = 1 << 31;
// On ia32, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
- Register scratch = GetUnusedRegister(kGpReg).gp();
- Register scratch2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+ LiftoffRegList pinned;
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Pextrd(scratch, lhs, 1); // move UH of {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
@@ -2500,7 +2500,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc);
+ LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -3389,7 +3389,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index c505cf7e6d1..e21ee60c0cf 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -542,7 +542,7 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
if (slot.is_reg()) {
return half == kLowWord ? slot.reg().low() : slot.reg().high();
}
- LiftoffRegister dst = GetUnusedRegister(kGpReg);
+ LiftoffRegister dst = GetUnusedRegister(kGpReg, {});
if (slot.is_stack()) {
FillI64Half(dst.gp(), slot.offset(), half);
return dst;
@@ -581,7 +581,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
if (!slot.is_const()) continue;
RegClass rc =
kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
- LiftoffRegister reg = GetUnusedRegister(rc);
+ LiftoffRegister reg = GetUnusedRegister(rc, {});
LoadConstant(reg, slot.constant());
slot.MakeRegister(reg);
cache_state_.inc_used(reg);
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 3377990496f..701b4b8e6a6 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -340,7 +340,7 @@ class LiftoffAssembler : public TurboAssembler {
// possible.
LiftoffRegister GetUnusedRegister(
RegClass rc, std::initializer_list<LiftoffRegister> try_first,
- LiftoffRegList pinned = {}) {
+ LiftoffRegList pinned) {
for (LiftoffRegister reg : try_first) {
DCHECK_EQ(reg.reg_class(), rc);
if (cache_state_.is_free(reg)) return reg;
@@ -349,7 +349,7 @@ class LiftoffAssembler : public TurboAssembler {
}
// Get an unused register for class {rc}, potentially spilling to free one.
- LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList candidates = kGpCacheRegList;
Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 86c33b387ec..071dce0f3de 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -495,7 +495,7 @@ class LiftoffCompiler {
position, __ cache_state()->used_registers,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
OutOfLineCode& ool = out_of_line_code_.back();
- Register limit_address = __ GetUnusedRegister(kGpReg).gp();
+ Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
@@ -604,7 +604,7 @@ class LiftoffCompiler {
*next_breakpoint_ptr_ == decoder->position());
if (!has_breakpoint) {
DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg).gp();
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
kSystemPointerSize);
Label no_break;
@@ -923,8 +923,8 @@ class LiftoffCompiler {
constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {src})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {src}, {})
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
__ PushRegister(ValueType(result_type), dst);
}
@@ -951,8 +951,9 @@ class LiftoffCompiler {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
- LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
- : __ GetUnusedRegister(dst_rc);
+ LiftoffRegister dst = src_rc == dst_rc
+ ? __ GetUnusedRegister(dst_rc, {src}, {})
+ : __ GetUnusedRegister(dst_rc, {});
DCHECK_EQ(!!can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
@@ -1122,8 +1123,8 @@ class LiftoffCompiler {
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType(result_type), dst);
@@ -1141,8 +1142,8 @@ class LiftoffCompiler {
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs, rhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
if (swap_lhs_rhs) std::swap(lhs, rhs);
@@ -1483,20 +1484,20 @@ class LiftoffCompiler {
if (value_i32 == value) {
__ PushConstant(kWasmI64, value_i32);
} else {
- LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
- LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
- LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
@@ -1546,7 +1547,7 @@ class LiftoffCompiler {
break;
case kStack: {
auto rc = reg_class_for(imm.type);
- LiftoffRegister reg = __ GetUnusedRegister(rc);
+ LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ Fill(reg, slot.offset(), imm.type);
__ PushRegister(slot.type(), reg);
break;
@@ -1570,7 +1571,7 @@ class LiftoffCompiler {
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
- LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
+ LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
@@ -1609,7 +1610,7 @@ class LiftoffCompiler {
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
LiftoffRegList* pinned, uint32_t* offset) {
- Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
+ Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
@@ -1675,8 +1676,8 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
- LiftoffRegister dst =
- __ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
+ LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
+ {true_value, false_value}, {});
__ PushRegister(type, dst);
// Now emit the actual code to move either {true_value} or {false_value}
@@ -2075,7 +2076,7 @@ class LiftoffCompiler {
}
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
- Register mem_size = __ GetUnusedRegister(kGpReg).gp();
+ Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
__ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
@@ -2344,7 +2345,7 @@ class LiftoffCompiler {
src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src3},
LiftoffRegList::ForRegs(src1, src2))
- : __ GetUnusedRegister(result_rc);
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
__ PushRegister(ValueType(result_type), dst);
}
@@ -2360,14 +2361,14 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister operand = __ PopToRegister();
- LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm);
__ PushRegister(kWasmS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
- LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count);
__ PushRegister(kWasmS128, dst);
@@ -2689,8 +2690,8 @@ class LiftoffCompiler {
static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
__ PushRegister(ValueType(result_type), dst);
}
@@ -2716,7 +2717,7 @@ class LiftoffCompiler {
(src2_rc == result_rc || pin_src2)
? __ GetUnusedRegister(result_rc, {src1},
LiftoffRegList::ForRegs(src2))
- : __ GetUnusedRegister(result_rc, {src1});
+ : __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
__ PushRegister(kWasmS128, dst);
}
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index f24c95008c9..8937919c300 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -603,7 +603,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -646,13 +646,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
- LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
+ LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -2251,7 +2251,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 292f8032b8f..e07ed7a393c 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -532,7 +532,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -582,13 +582,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
break;
@@ -2197,7 +2197,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}